From d68c55fd38fa0c6c17fd4bc20ad6b296d60d513d Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 15:26:09 -0300 Subject: [PATCH 01/47] chore: added native http2 protocol support --- grpc_core/lib/grpc/codec/proto.ex | 15 +- grpc_core/lib/grpc/http2/errors.ex | 54 +++++++ grpc_core/lib/grpc/http2/flow_control.ex | 44 ++++++ grpc_core/lib/grpc/http2/frame.ex | 103 +++++++++++++ .../lib/grpc/http2/frame/continuation.ex | 57 +++++++ grpc_core/lib/grpc/http2/frame/data.ex | 78 ++++++++++ grpc_core/lib/grpc/http2/frame/goaway.ex | 35 +++++ grpc_core/lib/grpc/http2/frame/headers.ex | 140 ++++++++++++++++++ grpc_core/lib/grpc/http2/frame/ping.ex | 44 ++++++ grpc_core/lib/grpc/http2/frame/priority.ex | 50 +++++++ .../lib/grpc/http2/frame/push_promise.ex | 69 +++++++++ grpc_core/lib/grpc/http2/frame/rst_stream.ex | 33 +++++ grpc_core/lib/grpc/http2/frame/settings.ex | 114 ++++++++++++++ grpc_core/lib/grpc/http2/frame/unknown.ex | 27 ++++ .../lib/grpc/http2/frame/window_update.ex | 32 ++++ grpc_core/lib/grpc/http2/settings.ex | 20 +++ grpc_core/lib/grpc/message.ex | 49 +++--- 17 files changed, 936 insertions(+), 28 deletions(-) create mode 100644 grpc_core/lib/grpc/http2/errors.ex create mode 100644 grpc_core/lib/grpc/http2/flow_control.ex create mode 100644 grpc_core/lib/grpc/http2/frame.ex create mode 100644 grpc_core/lib/grpc/http2/frame/continuation.ex create mode 100644 grpc_core/lib/grpc/http2/frame/data.ex create mode 100644 grpc_core/lib/grpc/http2/frame/goaway.ex create mode 100644 grpc_core/lib/grpc/http2/frame/headers.ex create mode 100644 grpc_core/lib/grpc/http2/frame/ping.ex create mode 100644 grpc_core/lib/grpc/http2/frame/priority.ex create mode 100644 grpc_core/lib/grpc/http2/frame/push_promise.ex create mode 100644 grpc_core/lib/grpc/http2/frame/rst_stream.ex create mode 100644 grpc_core/lib/grpc/http2/frame/settings.ex create mode 100644 grpc_core/lib/grpc/http2/frame/unknown.ex create mode 100644 grpc_core/lib/grpc/http2/frame/window_update.ex create mode 100644 grpc_core/lib/grpc/http2/settings.ex diff --git a/grpc_core/lib/grpc/codec/proto.ex b/grpc_core/lib/grpc/codec/proto.ex index 6dd53136e..f140f74f2 100644 --- a/grpc_core/lib/grpc/codec/proto.ex +++ b/grpc_core/lib/grpc/codec/proto.ex @@ -1,15 +1,12 @@ defmodule GRPC.Codec.Proto do @behaviour GRPC.Codec - def name() do - "proto" - end + # Inline codec functions for better performance + @compile {:inline, name: 0, encode: 2, decode: 2} - def encode(struct, _opts \\ []) do - Protobuf.Encoder.encode_to_iodata(struct) - end + def name, do: "proto" - def decode(binary, module) do - module.decode(binary) - end + def encode(struct, _opts \\ []), do: Protobuf.Encoder.encode_to_iodata(struct) + + def decode(binary, module), do: module.decode(binary) end diff --git a/grpc_core/lib/grpc/http2/errors.ex b/grpc_core/lib/grpc/http2/errors.ex new file mode 100644 index 000000000..eed81935a --- /dev/null +++ b/grpc_core/lib/grpc/http2/errors.ex @@ -0,0 +1,54 @@ +defmodule GRPC.HTTP2.Errors do + @moduledoc false + # Errors as defined in RFC9113§7 + + @typedoc "An error code as defined for GOAWAY and RST_STREAM errors" + @type error_code() :: + (no_error :: 0x0) + | (protocol_error :: 0x1) + | (internal_error :: 0x2) + | (flow_control_error :: 0x3) + | (settings_timeout :: 0x4) + | (stream_closed :: 0x5) + | (frame_size_error :: 0x6) + | (refused_stream :: 0x7) + | (cancel :: 0x8) + | (compression_error :: 0x9) + | (connect_error :: 0xA) + | (enhance_your_calm :: 0xB) + | (inadequate_security :: 0xC) + | (http_1_1_requires :: 0xD) + + error_codes = %{ + no_error: 0x0, + protocol_error: 0x1, + internal_error: 0x2, + flow_control_error: 0x3, + settings_timeout: 0x4, + stream_closed: 0x5, + frame_size_error: 0x6, + refused_stream: 0x7, + cancel: 0x8, + compression_error: 0x9, + connect_error: 0xA, + enhance_your_calm: 0xB, + inadequate_security: 0xC, + http_1_1_requires: 0xD + } + + for {name, code} <- error_codes do + def unquote(name)(), do: unquote(code) + end + + defmodule ConnectionError do + @moduledoc false + + defexception message: nil, error_code: nil + end + + defmodule StreamError do + @moduledoc false + + defexception message: nil, error_code: nil, stream_id: nil + end +end diff --git a/grpc_core/lib/grpc/http2/flow_control.ex b/grpc_core/lib/grpc/http2/flow_control.ex new file mode 100644 index 000000000..5ac340fc7 --- /dev/null +++ b/grpc_core/lib/grpc/http2/flow_control.ex @@ -0,0 +1,44 @@ +defmodule GRPC.HTTP2.FlowControl do + @moduledoc false + # Helpers for working with flow control window calculations + + import Bitwise + + @max_window_increment (1 <<< 31) - 1 + @max_window_size (1 <<< 31) - 1 + @min_window_size 1 <<< 30 + + @spec compute_recv_window(non_neg_integer(), non_neg_integer()) :: + {non_neg_integer(), non_neg_integer()} + def compute_recv_window(recv_window_size, data_size) do + # This is what our window size will be after receiving data_size bytes + recv_window_size = recv_window_size - data_size + + if recv_window_size > @min_window_size do + # We have room to go before we need to update our window + {recv_window_size, 0} + else + # We want our new window to be as large as possible, but are limited by both the maximum size + # of a WINDOW_UPDATE frame (max_window_increment) and the maximum window size (max_window_size) + window_increment = + min(@max_window_increment, @max_window_size - recv_window_size) + + {recv_window_size + window_increment, window_increment} + end + end + + @doc """ + Updates window size by increment, ensuring it doesn't exceed maximum. + """ + @spec update_window(non_neg_integer(), integer()) :: + {:ok, non_neg_integer()} | {:error, :flow_control_error} + def update_window(current_size, increment) do + new_size = current_size + increment + + if new_size > @max_window_size do + {:error, :flow_control_error} + else + {:ok, new_size} + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame.ex b/grpc_core/lib/grpc/http2/frame.ex new file mode 100644 index 000000000..32ffe6eb7 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame.ex @@ -0,0 +1,103 @@ +defmodule GRPC.HTTP2.Frame do + @moduledoc false + # HTTP/2 frame parsing and serialization adapted from Bandit + + @typedoc "Indicates a frame type" + @type frame_type :: non_neg_integer() + + @typedoc "The flags passed along with a frame" + @type flags :: byte() + + @typedoc "A valid HTTP/2 frame" + @type frame :: + GRPC.HTTP2.Frame.Data.t() + | GRPC.HTTP2.Frame.Headers.t() + | GRPC.HTTP2.Frame.Priority.t() + | GRPC.HTTP2.Frame.RstStream.t() + | GRPC.HTTP2.Frame.Settings.t() + | GRPC.HTTP2.Frame.Ping.t() + | GRPC.HTTP2.Frame.Goaway.t() + | GRPC.HTTP2.Frame.WindowUpdate.t() + | GRPC.HTTP2.Frame.Continuation.t() + | GRPC.HTTP2.Frame.Unknown.t() + + @spec deserialize(binary(), non_neg_integer()) :: + {{:ok, frame()}, iodata()} + | {{:more, iodata()}, <<>>} + | {{:error, GRPC.HTTP2.Errors.error_code(), binary()}, iodata()} + | nil + def deserialize( + <>, + max_frame_size + ) + when length <= max_frame_size do + type + |> case do + 0x0 -> GRPC.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) + 0x1 -> GRPC.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) + 0x2 -> GRPC.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) + 0x3 -> GRPC.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) + 0x4 -> GRPC.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) + 0x5 -> GRPC.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) + 0x6 -> GRPC.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) + 0x7 -> GRPC.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) + 0x8 -> GRPC.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) + 0x9 -> GRPC.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) + _unknown -> GRPC.HTTP2.Frame.Unknown.deserialize(type, flags, stream_id, payload) + end + |> case do + {:ok, frame} -> {{:ok, frame}, rest} + {:error, error_code, reason} -> {{:error, error_code, reason}, rest} + end + end + + def deserialize( + <>, + max_frame_size + ) + when length > max_frame_size do + {{:error, GRPC.HTTP2.Errors.frame_size_error(), "Payload size too large (RFC9113§4.2)"}, rest} + end + + # nil is used to indicate for Stream.unfold/2 that the frame deserialization is finished + def deserialize(<<>>, _max_frame_size) do + nil + end + + def deserialize(msg, _max_frame_size) do + {{:more, msg}, <<>>} + end + + defmodule Flags do + @moduledoc false + + import Bitwise + + defguard set?(flags, bit) when band(flags, bsl(1, bit)) != 0 + defguard clear?(flags, bit) when band(flags, bsl(1, bit)) == 0 + + @spec set([0..255]) :: 0..255 + def set([]), do: 0x0 + def set([bit | rest]), do: bor(bsl(1, bit), set(rest)) + end + + defprotocol Serializable do + @moduledoc false + + @spec serialize(any(), non_neg_integer()) :: [ + {GRPC.HTTP2.Frame.frame_type(), GRPC.HTTP2.Frame.flags(), + GRPC.HTTP2.Stream.stream_id(), iodata()} + ] + def serialize(frame, max_frame_size) + end + + @spec serialize(frame(), non_neg_integer()) :: iolist() + def serialize(frame, max_frame_size) do + frame + |> Serializable.serialize(max_frame_size) + |> Enum.map(fn {type, flags, stream_id, payload} -> + [<>, payload] + end) + end +end diff --git a/grpc_core/lib/grpc/http2/frame/continuation.ex b/grpc_core/lib/grpc/http2/frame/continuation.ex new file mode 100644 index 000000000..3dde8bc55 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/continuation.ex @@ -0,0 +1,57 @@ +defmodule GRPC.HTTP2.Frame.Continuation do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_headers: false, + fragment: nil + + @typedoc "An HTTP/2 CONTINUATION frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + end_headers: boolean(), + fragment: iodata() + } + + @end_headers_bit 2 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "CONTINUATION frame with zero stream_id (RFC9113§6.10)"} + end + + def deserialize(flags, stream_id, <>) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_headers: set?(flags, @end_headers_bit), + fragment: fragment + }} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @end_headers_bit 2 + + def serialize(%GRPC.HTTP2.Frame.Continuation{} = frame, max_frame_size) do + fragment_length = IO.iodata_length(frame.fragment) + + if fragment_length <= max_frame_size do + [{0x9, set([@end_headers_bit]), frame.stream_id, frame.fragment}] + else + <> = + IO.iodata_to_binary(frame.fragment) + + [ + {0x9, 0x00, frame.stream_id, this_frame} + | GRPC.HTTP2.Frame.Serializable.serialize( + %GRPC.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + max_frame_size + ) + ] + end + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/data.ex b/grpc_core/lib/grpc/http2/frame/data.ex new file mode 100644 index 000000000..29a7e05e3 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/data.ex @@ -0,0 +1,78 @@ +defmodule GRPC.HTTP2.Frame.Data do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_stream: false, + data: nil + + @typedoc "An HTTP/2 DATA frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + end_stream: boolean(), + data: iodata() + } + + @end_stream_bit 0 + @padding_bit 3 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), "DATA frame with zero stream_id (RFC9113§6.1)"} + end + + def deserialize(flags, stream_id, <>) + when set?(flags, @padding_bit) and byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + data: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + def deserialize(flags, stream_id, <>) when clear?(flags, @padding_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + data: data + }} + end + + def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) + when set?(flags, @padding_bit) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "DATA frame with invalid padding length (RFC9113§6.1)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @end_stream_bit 0 + + def serialize(%GRPC.HTTP2.Frame.Data{} = frame, max_frame_size) do + data_length = IO.iodata_length(frame.data) + + if data_length <= max_frame_size do + flags = if frame.end_stream, do: [@end_stream_bit], else: [] + [{0x0, set(flags), frame.stream_id, frame.data}] + else + <> = + IO.iodata_to_binary(frame.data) + + [ + {0x0, 0x00, frame.stream_id, this_frame} + | GRPC.HTTP2.Frame.Serializable.serialize( + %GRPC.HTTP2.Frame.Data{ + stream_id: frame.stream_id, + end_stream: frame.end_stream, + data: rest + }, + max_frame_size + ) + ] + end + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/goaway.ex b/grpc_core/lib/grpc/http2/frame/goaway.ex new file mode 100644 index 000000000..cd4620ca7 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/goaway.ex @@ -0,0 +1,35 @@ +defmodule GRPC.HTTP2.Frame.Goaway do + @moduledoc false + + defstruct last_stream_id: 0, error_code: 0, debug_data: <<>> + + @typedoc "An HTTP/2 GOAWAY frame" + @type t :: %__MODULE__{ + last_stream_id: GRPC.HTTP2.Stream.stream_id(), + error_code: GRPC.HTTP2.Errors.error_code(), + debug_data: iodata() + } + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize( + _flags, + 0, + <<_reserved::1, last_stream_id::31, error_code::32, debug_data::binary>> + ) do + {:ok, + %__MODULE__{last_stream_id: last_stream_id, error_code: error_code, debug_data: debug_data}} + end + + def deserialize(_flags, stream_id, _payload) when stream_id != 0 do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "GOAWAY frame with non-zero stream_id (RFC9113§6.8)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + def serialize(%GRPC.HTTP2.Frame.Goaway{} = frame, _max_frame_size) do + payload = <<0::1, frame.last_stream_id::31, frame.error_code::32, frame.debug_data::binary>> + [{0x7, 0x0, 0, payload}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/headers.ex b/grpc_core/lib/grpc/http2/frame/headers.ex new file mode 100644 index 000000000..9661962d9 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/headers.ex @@ -0,0 +1,140 @@ +defmodule GRPC.HTTP2.Frame.Headers do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_stream: false, + end_headers: false, + exclusive_dependency: false, + stream_dependency: nil, + weight: nil, + fragment: nil + + @typedoc "An HTTP/2 HEADERS frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + end_stream: boolean(), + end_headers: boolean(), + exclusive_dependency: boolean(), + stream_dependency: GRPC.HTTP2.Stream.stream_id() | nil, + weight: non_neg_integer() | nil, + fragment: iodata() + } + + @end_stream_bit 0 + @end_headers_bit 2 + @padding_bit 3 + @priority_bit 5 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "HEADERS frame with zero stream_id (RFC9113§6.2)"} + end + + # Padding and priority + def deserialize( + flags, + stream_id, + <> + ) + when set?(flags, @padding_bit) and set?(flags, @priority_bit) and + byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + exclusive_dependency: exclusive_dependency == 0x01, + stream_dependency: stream_dependency, + weight: weight, + fragment: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + # Padding but not priority + def deserialize(flags, stream_id, <>) + when set?(flags, @padding_bit) and clear?(flags, @priority_bit) and + byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + fragment: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + # Any other case where padding is set + def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) + when set?(flags, @padding_bit) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "HEADERS frame with invalid padding length (RFC9113§6.2)"} + end + + def deserialize( + flags, + stream_id, + <> + ) + when set?(flags, @priority_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + exclusive_dependency: exclusive_dependency == 0x01, + stream_dependency: stream_dependency, + weight: weight, + fragment: fragment + }} + end + + def deserialize(flags, stream_id, <>) + when clear?(flags, @priority_bit) and clear?(flags, @padding_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_stream: set?(flags, @end_stream_bit), + end_headers: set?(flags, @end_headers_bit), + fragment: fragment + }} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @end_stream_bit 0 + @end_headers_bit 2 + + def serialize( + %GRPC.HTTP2.Frame.Headers{ + exclusive_dependency: false, + stream_dependency: nil, + weight: nil + } = + frame, + max_frame_size + ) do + flags = if frame.end_stream, do: [@end_stream_bit], else: [] + + fragment_length = IO.iodata_length(frame.fragment) + + if fragment_length <= max_frame_size do + [{0x1, set([@end_headers_bit | flags]), frame.stream_id, frame.fragment}] + else + <> = + IO.iodata_to_binary(frame.fragment) + + [ + {0x1, set(flags), frame.stream_id, this_frame} + | GRPC.HTTP2.Frame.Serializable.serialize( + %GRPC.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + max_frame_size + ) + ] + end + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/ping.ex b/grpc_core/lib/grpc/http2/frame/ping.ex new file mode 100644 index 000000000..552dfd8d2 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/ping.ex @@ -0,0 +1,44 @@ +defmodule GRPC.HTTP2.Frame.Ping do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + + defstruct ack: false, payload: nil + + @typedoc "An HTTP/2 PING frame" + @type t :: %__MODULE__{ + ack: boolean(), + payload: iodata() + } + + @ack_bit 0 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(flags, 0, <>) when set?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: true, payload: payload}} + end + + def deserialize(flags, 0, <>) when clear?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: false, payload: payload}} + end + + def deserialize(_flags, stream_id, _payload) when stream_id != 0 do + {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid stream ID in PING frame (RFC9113§6.7)"} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.HTTP2.Errors.frame_size_error(), + "PING frame with invalid payload size (RFC9113§6.7)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @ack_bit 0 + + def serialize(%GRPC.HTTP2.Frame.Ping{ack: true} = frame, _max_frame_size), + do: [{0x6, set([@ack_bit]), 0, frame.payload}] + + def serialize(%GRPC.HTTP2.Frame.Ping{ack: false} = frame, _max_frame_size), + do: [{0x6, 0x0, 0, frame.payload}] + end +end diff --git a/grpc_core/lib/grpc/http2/frame/priority.ex b/grpc_core/lib/grpc/http2/frame/priority.ex new file mode 100644 index 000000000..73db02c72 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/priority.ex @@ -0,0 +1,50 @@ +defmodule GRPC.HTTP2.Frame.Priority do + @moduledoc false + + defstruct stream_id: nil, + exclusive_dependency: false, + stream_dependency: nil, + weight: nil + + @typedoc "An HTTP/2 PRIORITY frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + exclusive_dependency: boolean(), + stream_dependency: GRPC.HTTP2.Stream.stream_id(), + weight: non_neg_integer() + } + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "PRIORITY frame with zero stream_id (RFC9113§6.3)"} + end + + def deserialize( + _flags, + stream_id, + <> + ) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + exclusive_dependency: exclusive_dependency == 0x01, + stream_dependency: stream_dependency, + weight: weight + }} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.HTTP2.Errors.frame_size_error(), + "Invalid payload size in PRIORITY frame (RFC9113§6.3)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + def serialize(%GRPC.HTTP2.Frame.Priority{} = frame, _max_frame_size) do + exclusive = if frame.exclusive_dependency, do: 0x01, else: 0x00 + payload = <> + [{0x2, 0x0, frame.stream_id, payload}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/push_promise.ex b/grpc_core/lib/grpc/http2/frame/push_promise.ex new file mode 100644 index 000000000..b5a0b681f --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/push_promise.ex @@ -0,0 +1,69 @@ +defmodule GRPC.HTTP2.Frame.PushPromise do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + + defstruct stream_id: nil, + end_headers: false, + promised_stream_id: nil, + fragment: nil + + @typedoc "An HTTP/2 PUSH_PROMISE frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + end_headers: boolean(), + promised_stream_id: GRPC.HTTP2.Stream.stream_id(), + fragment: iodata() + } + + @end_headers_bit 2 + @padding_bit 3 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "PUSH_PROMISE frame with zero stream_id (RFC9113§6.6)"} + end + + def deserialize( + flags, + stream_id, + <> + ) + when set?(flags, @padding_bit) and byte_size(rest) >= padding_length do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_headers: set?(flags, @end_headers_bit), + promised_stream_id: promised_stream_id, + fragment: binary_part(rest, 0, byte_size(rest) - padding_length) + }} + end + + def deserialize(flags, stream_id, <<_reserved::1, promised_stream_id::31, fragment::binary>>) + when clear?(flags, @padding_bit) do + {:ok, + %__MODULE__{ + stream_id: stream_id, + end_headers: set?(flags, @end_headers_bit), + promised_stream_id: promised_stream_id, + fragment: fragment + }} + end + + def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) + when set?(flags, @padding_bit) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "PUSH_PROMISE frame with invalid padding length (RFC9113§6.6)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @end_headers_bit 2 + + def serialize(%GRPC.HTTP2.Frame.PushPromise{} = frame, _max_frame_size) do + payload = <<0::1, frame.promised_stream_id::31, frame.fragment::binary>> + [{0x5, set([@end_headers_bit]), frame.stream_id, payload}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/rst_stream.ex b/grpc_core/lib/grpc/http2/frame/rst_stream.ex new file mode 100644 index 000000000..499f8b6b5 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/rst_stream.ex @@ -0,0 +1,33 @@ +defmodule GRPC.HTTP2.Frame.RstStream do + @moduledoc false + + defstruct stream_id: nil, error_code: nil + + @typedoc "An HTTP/2 RST_STREAM frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + error_code: GRPC.HTTP2.Errors.error_code() + } + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), + "RST_STREAM frame with zero stream_id (RFC9113§6.4)"} + end + + def deserialize(_flags, stream_id, <>) do + {:ok, %__MODULE__{stream_id: stream_id, error_code: error_code}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.HTTP2.Errors.frame_size_error(), + "Invalid payload size in RST_STREAM frame (RFC9113§6.4)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + def serialize(%GRPC.HTTP2.Frame.RstStream{} = frame, _max_frame_size) do + [{0x3, 0x0, frame.stream_id, <>}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/settings.ex b/grpc_core/lib/grpc/http2/frame/settings.ex new file mode 100644 index 000000000..dc52f5757 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/settings.ex @@ -0,0 +1,114 @@ +defmodule GRPC.HTTP2.Frame.Settings do + @moduledoc false + + import GRPC.HTTP2.Frame.Flags + import Bitwise + + @max_window_size (1 <<< 31) - 1 + @min_frame_size 1 <<< 14 + @max_frame_size (1 <<< 24) - 1 + + defstruct ack: false, settings: nil + + @typedoc "An HTTP/2 SETTINGS frame" + @type t :: %__MODULE__{ack: true, settings: nil} | %__MODULE__{ack: false, settings: map()} + + @ack_bit 0 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(flags, 0, payload) when clear?(flags, @ack_bit) do + payload + |> Stream.unfold(fn + <<>> -> nil + <> -> {{:ok, {setting, value}}, rest} + <> -> {{:error, rest}, <<>>} + end) + |> Enum.reduce_while({:ok, %{}}, fn + {:ok, {0x01, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :header_table_size, value)}} + + {:ok, {0x02, val}}, {:ok, acc} when val in [0x00, 0x01] -> + {:cont, {:ok, acc}} + + {:ok, {0x02, _value}}, {:ok, _acc} -> + {:halt, + {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} + + {:ok, {0x03, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_concurrent_streams, value)}} + + {:ok, {0x04, value}}, {:ok, _acc} when value > @max_window_size -> + {:halt, + {:error, GRPC.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} + + {:ok, {0x04, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :initial_window_size, value)}} + + {:ok, {0x05, value}}, {:ok, _acc} when value < @min_frame_size -> + {:halt, + {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + + {:ok, {0x05, value}}, {:ok, _acc} when value > @max_frame_size -> + {:halt, + {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + + {:ok, {0x05, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_frame_size, value)}} + + {:ok, {0x06, value}}, {:ok, acc} -> + {:cont, {:ok, Map.put(acc, :max_header_list_size, value)}} + + {:ok, {_setting, _value}}, {:ok, acc} -> + {:cont, {:ok, acc}} + + {:error, _rest}, _acc -> + {:halt, + {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid SETTINGS size (RFC9113§6.5)"}} + end) + |> case do + {:ok, settings} -> {:ok, %__MODULE__{ack: false, settings: settings}} + {:error, error_code, reason} -> {:error, error_code, reason} + end + end + + def deserialize(flags, 0, <<>>) when set?(flags, @ack_bit) do + {:ok, %__MODULE__{ack: true}} + end + + def deserialize(flags, 0, _payload) when set?(flags, @ack_bit) do + {:error, GRPC.HTTP2.Errors.frame_size_error(), + "SETTINGS ack frame with non-empty payload (RFC9113§6.5)"} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid SETTINGS frame (RFC9113§6.5)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + @ack_bit 0 + + def serialize(%GRPC.HTTP2.Frame.Settings{ack: true}, _max_frame_size), + do: [{0x4, set([@ack_bit]), 0, <<>>}] + + def serialize(%GRPC.HTTP2.Frame.Settings{ack: false} = frame, _max_frame_size) do + payload = + frame.settings + |> Enum.uniq_by(fn {setting, _} -> setting end) + |> Enum.map(fn + {:header_table_size, 4_096} -> <<>> + {:header_table_size, value} -> <<0x01::16, value::32>> + {:max_concurrent_streams, :infinity} -> <<>> + {:max_concurrent_streams, value} -> <<0x03::16, value::32>> + {:initial_window_size, 65_535} -> <<>> + {:initial_window_size, value} -> <<0x04::16, value::32>> + {:max_frame_size, 16_384} -> <<>> + {:max_frame_size, value} -> <<0x05::16, value::32>> + {:max_header_list_size, :infinity} -> <<>> + {:max_header_list_size, value} -> <<0x06::16, value::32>> + end) + + [{0x4, 0x0, 0, payload}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/unknown.ex b/grpc_core/lib/grpc/http2/frame/unknown.ex new file mode 100644 index 000000000..c6f36f19e --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/unknown.ex @@ -0,0 +1,27 @@ +defmodule GRPC.HTTP2.Frame.Unknown do + @moduledoc false + + defstruct type: nil, + flags: nil, + stream_id: nil, + payload: nil + + @typedoc "An HTTP/2 frame of unknown type" + @type t :: %__MODULE__{ + type: GRPC.HTTP2.Frame.frame_type(), + flags: GRPC.HTTP2.Frame.flags(), + stream_id: GRPC.HTTP2.Stream.stream_id(), + payload: iodata() + } + + # Note this is arity 4 + @spec deserialize( + GRPC.HTTP2.Frame.frame_type(), + GRPC.HTTP2.Frame.flags(), + GRPC.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} + def deserialize(type, flags, stream_id, payload) do + {:ok, %__MODULE__{type: type, flags: flags, stream_id: stream_id, payload: payload}} + end +end diff --git a/grpc_core/lib/grpc/http2/frame/window_update.ex b/grpc_core/lib/grpc/http2/frame/window_update.ex new file mode 100644 index 000000000..ac8adcfb5 --- /dev/null +++ b/grpc_core/lib/grpc/http2/frame/window_update.ex @@ -0,0 +1,32 @@ +defmodule GRPC.HTTP2.Frame.WindowUpdate do + @moduledoc false + + import Bitwise + + defstruct stream_id: nil, size_increment: nil + + @typedoc "An HTTP/2 WINDOW_UPDATE frame" + @type t :: %__MODULE__{ + stream_id: GRPC.HTTP2.Stream.stream_id(), + size_increment: non_neg_integer() + } + + @max_window_increment (1 <<< 31) - 1 + + @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, stream_id, <<_reserved::1, size_increment::31>>) + when size_increment > 0 and size_increment <= @max_window_increment do + {:ok, %__MODULE__{stream_id: stream_id, size_increment: size_increment}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} + end + + defimpl GRPC.HTTP2.Frame.Serializable do + def serialize(%GRPC.HTTP2.Frame.WindowUpdate{} = frame, _max_frame_size) do + [{0x8, 0x0, frame.stream_id, <<0::1, frame.size_increment::31>>}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/settings.ex b/grpc_core/lib/grpc/http2/settings.ex new file mode 100644 index 000000000..b0c015672 --- /dev/null +++ b/grpc_core/lib/grpc/http2/settings.ex @@ -0,0 +1,20 @@ +defmodule GRPC.HTTP2.Settings do + @moduledoc """ + Settings as defined in RFC9113§6.5.2 + """ + + defstruct header_table_size: 4_096, + max_concurrent_streams: :infinity, + initial_window_size: 65_535, + max_frame_size: 16_384, + max_header_list_size: :infinity + + @typedoc "A collection of settings as defined in RFC9113§6.5" + @type t :: %__MODULE__{ + header_table_size: non_neg_integer(), + max_concurrent_streams: non_neg_integer() | :infinity, + initial_window_size: non_neg_integer(), + max_frame_size: non_neg_integer(), + max_header_list_size: non_neg_integer() | :infinity + } +end diff --git a/grpc_core/lib/grpc/message.ex b/grpc_core/lib/grpc/message.ex index 03ac2e339..8fb95df6b 100644 --- a/grpc_core/lib/grpc/message.ex +++ b/grpc_core/lib/grpc/message.ex @@ -15,6 +15,9 @@ defmodule GRPC.Message do @max_message_length Bitwise.bsl(1, 32 - 1) + # Inline hot path functions + @compile {:inline, to_data: 2, from_data: 1} + @doc """ Transforms Protobuf data into a gRPC body binary. @@ -42,33 +45,41 @@ defmodule GRPC.Message do @spec to_data(iodata, keyword()) :: {:ok, iodata, non_neg_integer} | {:error, String.t()} def to_data(message, opts \\ []) do - compressor = opts[:compressor] - iolist = opts[:iolist] - codec = opts[:codec] max_length = opts[:max_message_length] || @max_message_length - {compress_flag, message} = - if compressor do - {1, compressor.compress(message)} - else - {0, message} + {compress_flag, compressed_message} = + case opts[:compressor] do + nil -> {0, message} + compressor -> {1, compressor.compress(message)} end - length = IO.iodata_length(message) + length = IO.iodata_length(compressed_message) + + case length > max_length do + true -> + {:error, "Encoded message is too large (#{length} bytes)"} + + false -> + result = [compress_flag, <>, compressed_message] - if length > max_length do - {:error, "Encoded message is too large (#{length} bytes)"} - else - result = [compress_flag, <>, message] + result = + case opts[:codec] do + nil -> + result - result = - if function_exported?(codec, :pack_for_channel, 1), - do: codec.pack_for_channel(result), - else: result + codec when is_atom(codec) -> + if function_exported?(codec, :pack_for_channel, 1), + do: codec.pack_for_channel(result), + else: result + end - result = if iolist, do: result, else: IO.iodata_to_binary(result) + result = + case opts[:iolist] do + true -> result + _ -> IO.iodata_to_binary(result) + end - {:ok, result, length + 5} + {:ok, result, length + 5} end end From abb9b71cebcce5d30fa21de9d9018e7f2fcff7a6 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 15:26:43 -0300 Subject: [PATCH 02/47] feat: added thousand_island server adapter --- grpc_server/lib/grpc/server.ex | 2 +- .../lib/grpc/server/adapters/cowboy.ex | 3 + .../grpc/server/adapters/cowboy/handler.ex | 2 +- .../grpc/server/adapters/thousand_island.ex | 651 ++++++++++++ .../server/adapters/thousand_island/README.md | 236 +++++ .../adapters/thousand_island/handler.ex | 294 ++++++ .../adapters/thousand_island/http2.back | 275 +++++ grpc_server/lib/grpc/server/bidi_stream.ex | 168 +++ grpc_server/lib/grpc/server/cache.ex | 90 ++ .../lib/grpc/server/http2/connection.ex | 982 ++++++++++++++++++ .../lib/grpc/server/http2/dispatcher.ex | 821 +++++++++++++++ .../lib/grpc/server/http2/stream_state.ex | 360 +++++++ grpc_server/lib/grpc/server/stream.ex | 6 +- grpc_server/mix.exs | 2 + grpc_server/mix.lock | 1 + grpc_server/src/grpc_stream_h.erl | 10 +- .../server/adapters/thousand_island_test.exs | 70 ++ .../grpc/server/http2/connection_test.exs | 299 ++++++ .../test/grpc/server/http2/errors_test.exs | 79 ++ .../grpc/server/http2/flow_control_test.exs | 80 ++ .../test/grpc/server/http2/frame_test.exs | 260 +++++ .../test/grpc/server/http2/frame_test.exs.bak | 259 +++++ .../test/grpc/server/http2/settings_test.exs | 44 + 23 files changed, 4985 insertions(+), 9 deletions(-) create mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island.ex create mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/README.md create mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex create mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/http2.back create mode 100644 grpc_server/lib/grpc/server/bidi_stream.ex create mode 100644 grpc_server/lib/grpc/server/cache.ex create mode 100644 grpc_server/lib/grpc/server/http2/connection.ex create mode 100644 grpc_server/lib/grpc/server/http2/dispatcher.ex create mode 100644 grpc_server/lib/grpc/server/http2/stream_state.ex create mode 100644 grpc_server/test/grpc/server/adapters/thousand_island_test.exs create mode 100644 grpc_server/test/grpc/server/http2/connection_test.exs create mode 100644 grpc_server/test/grpc/server/http2/errors_test.exs create mode 100644 grpc_server/test/grpc/server/http2/flow_control_test.exs create mode 100644 grpc_server/test/grpc/server/http2/frame_test.exs create mode 100644 grpc_server/test/grpc/server/http2/frame_test.exs.bak create mode 100644 grpc_server/test/grpc/server/http2/settings_test.exs diff --git a/grpc_server/lib/grpc/server.ex b/grpc_server/lib/grpc/server.ex index 1e48c9261..57b2370d2 100644 --- a/grpc_server/lib/grpc/server.ex +++ b/grpc_server/lib/grpc/server.ex @@ -512,7 +512,7 @@ defmodule GRPC.Server do end @doc false - @spec servers_to_map(module() | [module()]) :: %{String.t() => [module()]} + @spec servers_to_map(module() | [module()]) :: %{String.t() => module()} def servers_to_map(servers) do Enum.reduce(List.wrap(servers), %{}, fn s, acc -> Map.put(acc, s.__meta__(:service).__meta__(:name), s) diff --git a/grpc_server/lib/grpc/server/adapters/cowboy.ex b/grpc_server/lib/grpc/server/adapters/cowboy.ex index 95ee0a2f3..42ffe56a2 100644 --- a/grpc_server/lib/grpc/server/adapters/cowboy.ex +++ b/grpc_server/lib/grpc/server/adapters/cowboy.ex @@ -89,6 +89,9 @@ defmodule GRPC.Server.Adapters.Cowboy do @spec start_link(atom(), atom(), %{String.t() => [module()]}, any()) :: {:ok, pid()} | {:error, any()} def start_link(scheme, endpoint, servers, {m, f, [ref | _] = a}) do + # Initialize ETS cache for codecs/compressors lookup + GRPC.Server.Cache.init() + case apply(m, f, a) do {:ok, pid} -> Logger.info(running_info(scheme, endpoint, servers, ref)) diff --git a/grpc_server/lib/grpc/server/adapters/cowboy/handler.ex b/grpc_server/lib/grpc/server/adapters/cowboy/handler.ex index bc596772d..8255fb2f3 100644 --- a/grpc_server/lib/grpc/server/adapters/cowboy/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/cowboy/handler.ex @@ -450,7 +450,7 @@ defmodule GRPC.Server.Adapters.Cowboy.Handler do {:stop, req, state} else - case GRPC.Message.to_data(data, compressor: compressor, codec: opts[:codec]) do + case GRPC.Message.to_data(data, compressor: compressor, codec: opts[:codec], iolist: true) do {:ok, data, _size} -> req = check_sent_resp(req) :cowboy_req.stream_body(data, is_fin, req) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex new file mode 100644 index 000000000..f581a67c0 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -0,0 +1,651 @@ +defmodule GRPC.Server.Adapters.ThousandIsland do + @moduledoc """ + A server (`GRPC.Server.Adapter`) adapter using `:thousand_island`. + + ThousandIsland is a modern, pure Elixir socket server that provides: + - Built-in connection pooling + - Efficient resource management + - Better integration with Elixir/OTP ecosystem + - Simpler architecture than Cowboy/Ranch + + ## Advantages over Cowboy + + - **Pure Elixir**: No Erlang dependencies, better integration + - **Built-in pooling**: Native connection pool management + - **Lower overhead**: Simpler architecture, fewer layers + - **Modern design**: Built with current Elixir best practices + - **Telemetry integration**: First-class observability support + + ## Architecture & Process Model + + ### Module Responsibilities + + 1. **GRPC.Server.Adapters.ThousandIsland** (this module) + - Adapter API implementation (`GRPC.Server.Adapter` behaviour) + - Server lifecycle (start/stop) + - Helper functions: `send_reply/3`, `send_headers/2`, `send_trailers/2` + - These functions send async messages to the Handler process + + 2. **GRPC.Server.Adapters.ThousandIsland.Handler** + - ThousandIsland.Handler behaviour implementation + - HTTP/2 connection lifecycle + - Frame processing coordinator + - Message handling (async operations from user handlers) + - State management (accumulated headers, connection state) + + 3. **GRPC.Server.HTTP2.Connection** + - HTTP/2 protocol state machine + - Frame encoding/decoding (HEADERS, DATA, SETTINGS, etc.) + - HPACK compression/decompression + - Stream state management (per-stream tracking) + - Flow control + + 4. **GRPC.Server.HTTP2.StreamState** + - Per-stream state tracking + - Message buffering and framing + - gRPC message assembly (5-byte length-prefix framing) + - Stream lifecycle (idle -> open -> half_closed -> closed) + + 5. **GRPC.Server.HTTP2.Dispatcher** + - RPC method routing and dispatch + - Determines RPC type (unary, client_stream, server_stream, bidi_stream) + - Spawns handler tasks for streaming RPCs + - Manages BidiStream GenServer for bidirectional streaming + + 6. **GRPC.Server.BidiStream** + - GenServer for bidirectional streaming + - Message queue for incoming requests + - Lazy enumerable generation for handler consumption + - Backpressure management + + ## Request Pipeline by RPC Type + + ### Process Hierarchy + + ``` + ThousandIsland Supervisor + └── Handler Process (one per connection) + ├── Connection State (HTTP2.Connection) + │ └── Stream States (HTTP2.StreamState per stream_id) + └── User Handler Tasks (spawned per RPC) + └── BidiStream GenServer (only for bidi streaming) + ``` + + ### 1. Unary RPC (request -> response) + + #### Request Path + 1. **Client sends HTTP/2 frames** → TCP socket + 2. **Handler.handle_data/3** receives raw bytes + - Buffers until complete frames available + 3. **Connection.handle_frame/3** processes each frame + - HEADERS frame → decode headers, create StreamState + - DATA frame → accumulate in StreamState.data_buffer + - When END_STREAM received → decode gRPC message + 4. **Connection.process_grpc_request/4** extracts complete request + - Decodes 5-byte length-prefixed message + - Looks up RPC method from path + 5. **Dispatcher.dispatch/4** routes to handler + - Calls `Dispatcher.call_unary/5` + - Directly invokes user handler function: `MyServer.my_method(request, stream)` + - Handler runs **synchronously** in Handler process + + #### Response Path + 1. **Handler returns response** (or calls `GRPC.Server.send_reply/2`) + 2. **Dispatcher** sends response headers + data + trailers + - Headers: `{":status" => "200", "content-type" => "application/grpc+proto"}` + - Data: gRPC framed message (5-byte length + protobuf) + - Trailers: `{"grpc-status" => "0"}` + 3. **Connection.send_headers/4** encodes HEADERS frame + - HPACK compression + - Sends via socket + 4. **Connection.send_data/5** encodes DATA frame + - Sets END_STREAM flag + 5. **Connection.send_trailers/4** encodes final HEADERS frame + - Sets END_HEADERS + END_STREAM flags + + **Process Model**: Single Handler process handles entire request synchronously + + ### 2. Client Streaming RPC (stream of requests -> response) + + #### Request Path + 1. **Client sends multiple DATA frames** (END_STREAM on last) + 2. **Handler.handle_data/3** → **Connection.handle_frame/3** + - Each DATA frame appends to StreamState.data_buffer + - Messages accumulated in StreamState.message_buffer + 3. **When END_STREAM received** → **process_grpc_request/4** + - All messages decoded + 4. **Dispatcher.call_client_streaming/5** + - Creates `Stream.unfold` from buffered messages + - Calls handler: `MyServer.my_method(request_enum, stream)` + - Handler **synchronously** consumes stream + + #### Response Path + Same as Unary (single response at end) + + **Process Model**: Single Handler process, synchronous handler execution + + ### 3. Server Streaming RPC (request -> stream of responses) + + #### Request Path + Same as Unary (single request) + + #### Response Path + 1. **Handler calls `GRPC.Server.send_reply/2` multiple times** + 2. **This adapter's `send_reply/3`** sends async message: + - `send(handler_pid, {:grpc_send_data, stream_id, framed_data})` + 3. **Handler.handle_info/2** receives `:grpc_send_data` + - Calls `Connection.send_data/5` to send DATA frame + - Each call is a separate DATA frame (END_STREAM=false) + 4. **Final trailers** sent at end + - `GRPC.Server.send_trailers/2` → `{:grpc_send_trailers, ...}` + - Handler sends final HEADERS frame with END_STREAM + + **Process Model**: + - Handler spawns **Task** to run user handler asynchronously + - Handler process receives messages from Task and sends frames + - Task communicates via messages to Handler process + + ### 4. Bidirectional Streaming RPC (stream ↔ stream) + + This is the most complex case with multiple concurrent processes. + + #### Process Model + + ``` + Handler Process (#PID<0.545.0>) + ├── State: %{accumulated_headers: %{stream_id => headers}, ...} + ├── Receives: HTTP/2 frames from client + ├── Receives: {:grpc_send_data, ...} from User Task + └── Sends: HTTP/2 frames to client + + User Handler Task (#PID<0.XXX.0>) + ├── Runs: MyServer.full_duplex_call(request_enum, stream) + ├── Consumes: request_enum (lazy, pulls from BidiStream) + └── Sends: {:grpc_send_data, ...} messages to Handler + + BidiStream GenServer (#PID<0.YYY.0>) + ├── Queue: Buffered incoming requests + ├── Receives: {:add_message, msg} from Handler (when DATA arrives) + └── Provides: Lazy enumerable to User Task + ``` + + #### Request Path (Incoming) + + 1. **Client sends DATA frames** (multiple, no END_STREAM until done) + 2. **Handler.handle_data/3** → **Connection.handle_frame/3** + - Each DATA frame processed immediately + 3. **Connection.process_grpc_request/4** (on first HEADERS) + - Creates StreamState with `is_bidi_streaming: true` + - Calls **Dispatcher.call_bidi_streaming/5** + 4. **Dispatcher.call_bidi_streaming/5** (CRITICAL!) + - **Starts BidiStream GenServer**: `{:ok, bidi_pid} = BidiStream.start_link(stream_id, [])` + - **Accumulates base headers** (don't send yet!): + ```elixir + base_headers = %{":status" => "200", "content-type" => "application/grpc+proto"} + GRPC.Server.set_headers(stream, base_headers) # Sends {:grpc_accumulate_headers, ...} + ``` + - **Spawns User Handler Task**: + ```elixir + request_enum = BidiStream.to_enum(bidi_pid) + Task.start(fn -> + MyServer.full_duplex_call(request_enum, stream) + end) + ``` + - **Stores bidi_pid in StreamState** for later DATA frames + - Returns `:streaming_done` (dispatcher exits, Handler continues) + 5. **Subsequent DATA frames** (while handler running) + - **Connection.handle_frame/3** receives DATA frame + - Decodes gRPC message + - **Sends to BidiStream**: `GenServer.cast(bidi_pid, {:add_message, message})` + - BidiStream queues message for handler consumption + 6. **User Handler consumes request_enum** + - `Enum.each(request_enum, fn req -> ... end)` + - Each iteration pulls from BidiStream (lazy, blocks if queue empty) + - BidiStream dequeues message and returns to handler + + #### Response Path (Outgoing) + + 1. **User Handler calls `GRPC.Server.send_reply/2`** + - Runs in User Task process + 2. **This adapter's `send_reply/3`**: + ```elixir + send(handler_pid, {:grpc_send_data, stream_id, framed_data}) + ``` + 3. **Handler.handle_info({:grpc_send_data, ...}, state)** + - **CRITICAL: Header accumulation pattern** + - Checks if accumulated headers exist for stream_id: + ```elixir + accumulated = Map.get(state.accumulated_headers, stream_id, %{}) + if map_size(accumulated) > 0 do + # First DATA frame - send accumulated headers first! + Connection.send_headers(socket, stream_id, accumulated, connection) + # Clear accumulated headers + state = %{state | accumulated_headers: Map.delete(..., stream_id)} + end + ``` + - Then sends DATA frame: + ```elixir + Connection.send_data(socket, stream_id, data, false, connection) + ``` + 4. **Custom metadata support**: + - If handler calls `GRPC.Server.send_headers/2`: + ```elixir + send(handler_pid, {:grpc_accumulate_headers, stream_id, headers}) + ``` + - **Handler.handle_info({:grpc_accumulate_headers, ...})**: + ```elixir + current = Map.get(state.accumulated_headers, stream_id, %{}) + updated = Map.merge(current, headers) + state = %{state | accumulated_headers: Map.put(..., stream_id, updated)} + ``` + - These headers are sent with FIRST DATA frame (see step 3) + 5. **Final trailers** (when handler finishes) + - `GRPC.Server.send_trailers/2` → `{:grpc_send_trailers, stream_id, trailers}` + - **Handler.handle_info({:grpc_send_trailers, ...})**: + - Checks for unsent accumulated headers (empty stream case): + ```elixir + if map_size(accumulated_headers) > 0 do + Connection.send_headers(...) # Send base headers first + end + Connection.send_trailers(...) # Then trailers with END_STREAM + ``` + + #### Critical Timing & Synchronization + + **Problem**: HTTP/2 requires HEADERS before DATA, but we need to: + 1. Allow handler to add custom headers (via `send_headers/2`) + 2. Send base headers (`:status`, `content-type`) + 3. All in FIRST HEADERS frame (can't send HEADERS twice) + + **Solution** (inspired by Cowboy's `set_resp_headers` pattern): + 1. **Dispatcher accumulates base headers** without sending: + - Sends `{:grpc_accumulate_headers, stream_id, base_headers}` message + - Handler stores in `state.accumulated_headers` + 2. **User handler can add custom headers** (optional): + - Calls `GRPC.Server.send_headers(stream, custom_headers)` + - Merges into accumulated headers in Handler state + 3. **First `send_reply` sends ALL accumulated headers**: + - Handler checks `accumulated_headers[stream_id]` + - Sends merged (base + custom) headers in SINGLE HEADERS frame + - Clears accumulated headers + - Then sends DATA frame + 4. **For empty streams** (no `send_reply` calls): + - `send_trailers` checks for unsent accumulated headers + - Sends headers before trailers + + **Why this works**: + - Handler process is single-threaded message loop + - Messages processed in order: accumulate_headers → send_data → send_trailers + - Accumulated headers guaranteed to be merged before first DATA + - User Task sends messages asynchronously, Handler serializes them + + ## Message Flow Diagram (Bidi Streaming) + + ``` + Client Handler Process User Task BidiStream + │ │ │ │ + ├─── HEADERS ────────────>│ │ │ + │ ├─ create StreamState │ │ + │ ├─ start BidiStream ───────┼──────────────────────>│ + │ ├─ accumulate headers │ │ + │ ├─ spawn Task ────────────>│ │ + │ │ ├─ request_enum │ + │ │ │ (lazy, blocks) │ + ├─── DATA(req1) ─────────>│ │ │ + │ ├─ decode message │ │ + │ ├─ add_message ────────────┼──────────────────────>│ + │ │ │<──── pull next ───────┤ + │ │ │ (req1) │ + │ │ ├─ process req1 │ + │ │ ├─ send_reply(resp1) │ + │ │<── :grpc_send_data ──────┤ │ + │ ├─ send headers (1st!) │ │ + │<─── HEADERS ────────────┤ │ │ + │<─── DATA(resp1) ────────┤ │ │ + ├─── DATA(req2) ─────────>│ │ │ + │ ├─ add_message ────────────┼──────────────────────>│ + │ │ │<──── pull next ───────┤ + │ │ │ (req2) │ + │ │ ├─ process req2 │ + │ │ ├─ send_reply(resp2) │ + │ │<── :grpc_send_data ──────┤ │ + │<─── DATA(resp2) ────────┤ │ │ + ├─── DATA (END_STREAM) ──>│ │ │ + │ ├─ finish stream ──────────┼──────────────────────>│ + │ │ │<──── nil (done) ──────┤ + │ │ ├─ handler finishes │ + │ │ ├─ send_trailers │ + │ │<── :grpc_send_trailers ──┤ │ + │<─── HEADERS(trailers) ──┤ │ │ + │ (END_STREAM) │ x x + ``` + + ## Key Design Patterns + + 1. **Async Message Passing**: User handlers send messages to Handler process + - Decouples user code from HTTP/2 frame management + - Handler serializes all socket writes (thread-safe) + + 2. **Lazy Enumerables**: Streaming requests use `Stream.unfold` + - Backpressure: handler blocks if no messages available + - Memory efficient: doesn't buffer entire stream + + 3. **Header Accumulation**: Inspired by Cowboy's `set_resp_headers` + - Accumulate headers in Handler state (not process dictionary!) + - Send on first DATA or trailers (whichever comes first) + - Allows custom headers while respecting HTTP/2 constraints + + 4. **GenServer Message Queue**: BidiStream acts as message buffer + - Decouples incoming frame rate from handler processing rate + - Natural backpressure via GenServer mailbox + + 5. **Process Dictionary for Stream Metadata**: + - Used in Dispatcher context: `Process.put({:bidi_stream_pid, stream_id}, pid)` + - Allows Connection to find BidiStream when DATA arrives + - Alternative to passing state through deep call stack + """ + + @behaviour GRPC.Server.Adapter + + require Logger + alias GRPC.Server.Adapters.ThousandIsland.Handler + + @default_num_acceptors 100 + @default_max_connections 16384 + + @doc """ + Starts a ThousandIsland server. + + ## Options + * `:ip` - The IP to bind the server to (default: listen on all interfaces) + * `:port` - The port to listen on (required) + * `:num_acceptors` - Number of acceptor processes (default: 100) + * `:num_connections` - Maximum concurrent connections (default: 16384) + * `:transport_options` - Additional transport options to pass to ThousandIsland + """ + @impl true + def start(endpoint, servers, port, opts) do + # Initialize ETS cache for codecs/compressors lookup + GRPC.Server.Cache.init() + + # Ensure Task.Supervisor is started (for direct start_link calls outside supervision tree) + case Process.whereis(GRPC.Server.StreamTaskSupervisor) do + nil -> + {:ok, _} = Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) + + _pid -> + :ok + end + + server_opts = build_server_opts(endpoint, servers, port, opts) + + case ThousandIsland.start_link(server_opts) do + {:ok, pid} -> + # Get actual port (important when port=0 for random port) + actual_port = get_actual_port(pid, port) + {:ok, pid, actual_port} + + {:error, {:already_started, pid}} -> + Logger.warning("Failed to start #{servers_name(endpoint, servers)}: already started") + actual_port = get_actual_port(pid, port) + {:ok, pid, actual_port} + + {:error, :eaddrinuse} = error -> + Logger.error("Failed to start #{servers_name(endpoint, servers)}: port already in use") + error + + {:error, _} = error -> + error + end + end + + defp get_actual_port(pid, default_port) do + case ThousandIsland.listener_info(pid) do + {:ok, {_ip, actual_port}} -> actual_port + _ -> default_port + end + end + + @doc """ + Return a child_spec to start server under a supervisor. + """ + @spec child_spec(atom(), %{String.t() => [module()]}, non_neg_integer(), Keyword.t()) :: + Supervisor.child_spec() + def child_spec(endpoint, servers, port, opts) do + server_opts = build_server_opts(endpoint, servers, port, opts) + + # Initialize ETS cache for codecs/compressors lookup + GRPC.Server.Cache.init() + + scheme = if cred_opts(opts), do: :https, else: :http + + Logger.info( + "Starting #{servers_name(endpoint, servers)} with ThousandIsland using #{scheme}://0.0.0.0:#{port}" + ) + + server_name = servers_name(endpoint, servers) + + # Create children for the supervisor + children = [ + {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, + %{ + id: :thousand_island, + start: {ThousandIsland, :start_link, [server_opts]}, + type: :supervisor, + restart: :permanent, + shutdown: :infinity + } + ] + + %{ + id: server_name, + start: {Supervisor, :start_link, [children, [strategy: :rest_for_one]]}, + type: :supervisor, + restart: :permanent, + shutdown: :infinity + } + end + + @impl true + def stop(_endpoint, _servers) do + # TODO: Implement proper shutdown of ThousandIsland server + # ThousandIsland.stop(server_pid) + :ok + end + + @spec read_body(GRPC.Server.Adapter.state()) :: {:ok, binary()} + def read_body(%{data: data}) do + # Data is already in payload, return it directly + {:ok, data} + end + + @spec reading_stream(GRPC.Server.Adapter.state()) :: Enumerable.t() + def reading_stream(%{data: data}) do + # Create a stream that yields the data once + Stream.unfold({data, false}, fn + {_, true} -> + nil + + {buffer, false} -> + case GRPC.Message.get_message(buffer) do + {message, rest} -> {message, {rest, false}} + _ -> nil + end + end) + end + + def set_headers(%{handler_pid: pid, stream_id: stream_id}, headers) do + # Send message to accumulate headers in handler state + send(pid, {:grpc_accumulate_headers, stream_id, headers}) + :ok + end + + def set_resp_trailers(%{handler_pid: _pid, stream_id: stream_id}, trailers) do + # Store in process dictionary (runs in handler context during dispatch) + current_custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + Process.put({:grpc_custom_trailers, stream_id}, Map.merge(current_custom_trailers, trailers)) + :ok + end + + def get_headers(%{headers: headers}) do + # Return request headers from payload + headers + end + + def get_headers(%{connection: connection}) do + # Fallback: Return request headers from connection metadata + connection.metadata || %{} + end + + def get_peer(%{socket: socket}) do + case ThousandIsland.Socket.peername(socket) do + {:ok, {address, port}} -> + {:ok, {address, port}} + + error -> + error + end + end + + def get_cert(%{socket: socket}) do + # Get SSL certificate if available + case ThousandIsland.Socket.peercert(socket) do + {:ok, cert} -> {:ok, cert} + {:error, _} -> {:error, :no_peercert} + end + end + + def get_qs(_payload) do + # Query string not applicable for gRPC + "" + end + + def get_bindings(_payload) do + # Path bindings not applicable for gRPC + %{} + end + + def set_compressor(_payload, _compressor) do + # Compressor will be stored in connection state + :ok + end + + @impl true + def send_reply(%{handler_pid: pid, stream_id: stream_id}, data, opts) do + # Encode message with gRPC framing (compressed flag + length + data) + compressor = Keyword.get(opts, :compressor) + codec = Keyword.get(opts, :codec) + + case GRPC.Message.to_data(data, compressor: compressor, codec: codec, iolist: true) do + {:ok, framed_data, _size} -> + # Send data frame - handler will send accumulated headers first if needed + send(pid, {:grpc_send_data, stream_id, framed_data}) + :ok + + {:error, _msg} -> + :ok + end + end + + # Fallback for non-streaming (shouldn't happen but keeps compatibility) + def send_reply(_payload, _data, _opts), do: :ok + + @impl true + def send_headers(%{handler_pid: pid, stream_id: stream_id}, headers) do + # Send message to accumulate headers in handler state + # They will be sent on first send_reply call + send(pid, {:grpc_accumulate_headers, stream_id, headers}) + :ok + end + + # Fallback for non-streaming + def send_headers(_payload, _headers), do: :ok + + def send_trailers(%{handler_pid: pid, stream_id: stream_id}, trailers) do + # Send trailers for streaming to handler process + send(pid, {:grpc_send_trailers, stream_id, trailers}) + :ok + end + + defp build_server_opts(endpoint, servers, port, opts) do + adapter_opts = Keyword.get(opts, :adapter_opts, opts) + + num_acceptors = Keyword.get(adapter_opts, :num_acceptors, @default_num_acceptors) + num_connections = Keyword.get(adapter_opts, :num_connections, @default_max_connections) + + transport_opts = + adapter_opts + |> Keyword.get(:transport_options, []) + |> Keyword.put(:port, port) + |> maybe_add_ip(adapter_opts) + |> maybe_add_ssl(cred_opts(opts)) + # Optimize TCP buffers for gRPC performance (support up to 1MB messages) + # 1MB buffer for large messages + |> Keyword.put_new(:buffer, 1_048_576) + # 1MB receive buffer + |> Keyword.put_new(:recbuf, 1_048_576) + # 1MB send buffer + |> Keyword.put_new(:sndbuf, 1_048_576) + # Disable Nagle's algorithm for low latency + |> Keyword.put_new(:nodelay, true) + + # Configure HTTP/2 settings for larger frames (needed for large gRPC messages) + local_settings = [ + # 1MB window size for large payloads + initial_window_size: 1_048_576, + # Keep default max frame size + max_frame_size: 16_384 + ] + + handler_options = %{ + endpoint: endpoint, + servers: servers, + opts: [local_settings: local_settings] + } + + Logger.debug("[build_server_opts] Creating handler_options") + + [ + port: port, + transport_module: transport_module(opts), + transport_options: transport_opts, + handler_module: Handler, + handler_options: handler_options, + num_acceptors: num_acceptors, + num_connections: num_connections + ] + end + + defp maybe_add_ip(transport_opts, adapter_opts) do + case Keyword.get(adapter_opts, :ip) do + nil -> transport_opts + ip -> Keyword.put(transport_opts, :ip, ip) + end + end + + defp maybe_add_ssl(transport_opts, nil), do: transport_opts + + defp maybe_add_ssl(transport_opts, cred_opts) do + transport_opts ++ cred_opts.ssl + end + + defp transport_module(opts) do + if cred_opts(opts) do + ThousandIsland.Transports.SSL + else + ThousandIsland.Transports.TCP + end + end + + defp cred_opts(opts) do + Kernel.get_in(opts, [:cred]) + end + + defp servers_name(nil, servers) do + servers |> Map.values() |> Enum.map(fn s -> inspect(s) end) |> Enum.join(",") + end + + defp servers_name(endpoint, _) do + inspect(endpoint) + end +end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/README.md b/grpc_server/lib/grpc/server/adapters/thousand_island/README.md new file mode 100644 index 000000000..9368322d9 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/README.md @@ -0,0 +1,236 @@ +# ThousandIsland Adapter + +Pure Elixir HTTP/2 gRPC server adapter using ThousandIsland. + +## Overview + +This adapter provides a pure Elixir implementation of HTTP/2 for gRPC, built on top of ThousandIsland TCP server. Unlike the Cowboy adapter, this implementation has no native dependencies. + +## Usage + +### Basic Setup + +In your application supervisor: + +```elixir +defmodule MyApp.Application do + use Application + + def start(_type, _args) do + children = [ + # Your gRPC endpoint using ThousandIsland adapter + {GRPC.Server.Supervisor, + endpoint: MyApp.Endpoint, + port: 50051, + start_server: true, + adapter: GRPC.Server.Adapters.ThousandIsland, + adapter_opts: [ + num_acceptors: 10, + max_connections: 1000 + ]} + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end +end +``` + +### Defining Your Endpoint + +```elixir +defmodule MyApp.Endpoint do + use GRPC.Endpoint + + intercept GRPC.Server.Interceptors.Logger + run MyApp.Greeter.Server + run MyApp.OtherService.Server +end +``` + +### Defining Your Service + +```elixir +defmodule MyApp.Greeter.Service do + use GRPC.Service, name: "helloworld.Greeter" + + rpc :SayHello, Helloworld.HelloRequest, Helloworld.HelloReply +end + +defmodule MyApp.Greeter.Server do + use GRPC.Server, service: MyApp.Greeter.Service + + @spec say_hello(Helloworld.HelloRequest.t(), GRPC.Server.Stream.t()) :: + Helloworld.HelloReply.t() + def say_hello(request, _stream) do + Helloworld.HelloReply.new(message: "Hello #{request.name}") + end +end +``` + +## Configuration Options + +### Adapter Options + +- `:num_acceptors` - Number of acceptor processes (default: 100) +- `:max_connections` - Maximum concurrent connections (default: 16_384) +- `:transport_options` - Additional transport options passed to ThousandIsland + +### TLS Configuration + +For production use, configure TLS: + +```elixir +{GRPC.Server.Supervisor, + endpoint: MyApp.Endpoint, + port: 50051, + start_server: true, + adapter: GRPC.Server.Adapters.ThousandIsland, + adapter_opts: [ + transport_module: ThousandIsland.Transports.SSL, + transport_options: [ + certfile: "/path/to/cert.pem", + keyfile: "/path/to/key.pem", + alpn_preferred_protocols: ["h2"] + ] + ]} +``` + +## Features + +### Supported + +- ✅ HTTP/2 with HPACK header compression +- ✅ All 4 gRPC streaming types: + - Unary (single request → single response) + - Client streaming (stream → single response) + - Server streaming (single → stream) + - Bidirectional streaming (stream → stream) +- ✅ Flow control (connection and stream level) +- ✅ Multiple concurrent streams per connection +- ✅ gRPC error handling with status codes +- ✅ Content negotiation (protobuf, compression) +- ✅ Pure Elixir implementation (no NIFs) + +### Not Yet Supported + +- ⚠️ HTTP/2 Server Push +- ⚠️ SETTINGS frame priority handling +- ⚠️ HTTP/2 PRIORITY frames + +## Architecture + +``` +GRPC.Server.Supervisor + └── ThousandIsland (TCP acceptor pool) + └── Connection processes (one per client) + ├── Handler (HTTP/2 frame handling) + ├── Connection (state + dispatch) + ├── StreamRegistry (stream lifecycle) + └── Dispatcher (gRPC service routing) +``` + +Each client connection is handled by a separate process that: + +1. Accepts HTTP/2 connection preface +2. Manages HTTP/2 settings and flow control +3. Routes incoming gRPC requests to service implementations +4. Handles streaming in both directions +5. Encodes responses with proper framing and headers + +## Testing + +### Unit Tests + +Unit tests for the adapter are in `test/grpc/server/adapters/thousand_island_test.exs`. + +### Integration Tests + +Integration tests belong in the **grpc_client** package, not in grpc_server. This follows the architecture where: + +- `grpc_server` contains server implementation and unit tests +- `grpc_client` contains client implementation and integration tests (client ↔ server) + +To test the ThousandIsland adapter end-to-end: + +1. Start a test server using `GRPC.Server.Supervisor` with the ThousandIsland adapter +2. Use `GRPC.Stub` to create a client connection +3. Make RPC calls and verify responses + +Example integration test (in grpc_client): + +```elixir +# In grpc_client/test/integration/thousand_island_test.exs + +setup_all do + # Start server with ThousandIsland adapter + {:ok, _pid, port} = GRPC.Server.start( + MyTestEndpoint, + port: 0, + adapter: GRPC.Server.Adapters.ThousandIsland + ) + + # Connect client + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + {:ok, channel: channel} +end + +test "unary RPC works", %{channel: channel} do + request = Helloworld.HelloRequest.new(name: "World") + {:ok, reply} = channel |> Helloworld.Greeter.Stub.say_hello(request) + + assert reply.message == "Hello World" +end +``` + +## Performance Considerations + +- Each connection runs in its own process +- Connection processes are supervised by ThousandIsland +- Streams are tracked in ETS for fast lookup +- Flow control prevents memory exhaustion +- HPACK compression reduces header overhead + +For high-throughput scenarios, tune: + +- `:num_acceptors` - More acceptors for connection bursts +- `:max_connections` - Total concurrent connection limit +- Initial flow control window sizes (in Settings) + +## Comparison with Cowboy Adapter + +| Feature | ThousandIsland | Cowboy | +|---------|---------------|--------| +| Implementation | Pure Elixir | NIF (Ranch) | +| HTTP/2 | Custom | Cowlib | +| Dependencies | Minimal | Ranch, Cowlib | +| Performance | Good | Excellent | +| Debugging | Easier | Harder | +| Production Ready | Testing | Yes | + +## Development Status + +**Current Status**: Feature-complete, testing phase + +The adapter implements all core gRPC functionality. Integration testing with real clients is needed before production use. + +## Contributing + +The adapter code is in `lib/grpc/server/adapters/thousand_island/`: + +- `adapter.ex` - Main adapter interface +- `handler.ex` - HTTP/2 frame handling +- `connection.ex` - Connection state and dispatch +- `dispatcher.ex` - gRPC service routing +- `stream_state.ex` - Individual stream lifecycle +- `stream_registry.ex` - Multi-stream coordination +- `stream_collector.ex` - Streaming response collection +- `http2/` - HTTP/2 protocol implementation + +When contributing: + +1. Add unit tests for new functionality +2. Follow existing code style +3. Update this README for user-visible changes +4. Add integration tests in grpc_client package diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex new file mode 100644 index 000000000..008c78480 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -0,0 +1,294 @@ +defmodule GRPC.Server.Adapters.ThousandIsland.Handler do + @moduledoc """ + ThousandIsland handler for gRPC requests. + + Implementa ThousandIsland.Handler para lidar com gRPC sobre HTTP/2. + """ + use ThousandIsland.Handler + + alias GRPC.Server.HTTP2.Connection + alias GRPC.HTTP2.Frame + alias GRPC.HTTP2.Errors + require Logger + + # HTTP/2 connection preface per RFC9113§3.4 + @connection_preface "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + # Inline hot path functions + @compile {:inline, handle_data: 3, handle_preface: 3, handle_frames_loop: 5} + + @impl ThousandIsland.Handler + def handle_connection(socket, handler_options) do + Logger.debug("New HTTP/2 connection established - socket: #{inspect(socket)}") + + # Initialize ETS cache for codecs/compressors lookup + GRPC.Server.Cache.init() + + # Support both keyword list and map formats + {servers_list, endpoint, opts} = + if is_map(handler_options) do + {Map.get(handler_options, :servers, []), Map.get(handler_options, :endpoint), + Map.get(handler_options, :opts, [])} + else + {Keyword.get(handler_options, :servers, []), Keyword.get(handler_options, :endpoint), + Keyword.get(handler_options, :opts, [])} + end + + # Convert servers list/map to standardized map format + servers = + cond do + is_map(servers_list) and not is_struct(servers_list) -> servers_list + is_list(servers_list) -> GRPC.Server.servers_to_map(servers_list) + true -> %{} + end + + Logger.debug("[handle_connection] servers: #{inspect(servers)}") + Logger.debug("[handle_connection] endpoint: #{inspect(endpoint)}") + + new_state = %{ + endpoint: endpoint, + servers: servers, + opts: opts, + connection: nil, + buffer: <<>>, + preface_received: false, + # Map of stream_id => headers + accumulated_headers: %{} + } + + {:continue, new_state} + end + + @impl ThousandIsland.Handler + def handle_data(data, socket, %{preface_received: false, buffer: buffer} = state) do + new_buffer = buffer <> data + handle_preface(new_buffer, socket, state) + end + + def handle_data(data, socket, %{buffer: buffer} = state) do + new_buffer = buffer <> data + handle_frames(new_buffer, socket, state) + end + + @impl ThousandIsland.Handler + def handle_close(_socket, state) do + Logger.debug("Connection closed") + {:close, state} + end + + @impl ThousandIsland.Handler + def handle_error(reason, _socket, state) do + Logger.error("Connection error: #{inspect(reason)}") + {:close, state} + end + + # Streaming message handlers (GenServer callbacks) + # ThousandIsland passes state as {socket, user_state} + + # Accumulate headers without sending + def handle_info({:grpc_accumulate_headers, stream_id, headers}, {socket, state}) do + current_headers = Map.get(state.accumulated_headers, stream_id, %{}) + updated_headers = Map.merge(current_headers, headers) + new_accumulated = Map.put(state.accumulated_headers, stream_id, updated_headers) + {:noreply, {socket, %{state | accumulated_headers: new_accumulated}}} + end + + def handle_info({:grpc_send_headers, stream_id, headers}, {socket, state}) do + # Send headers frame for streaming + Logger.debug("[Streaming] Sending headers for stream #{stream_id}") + Connection.send_headers(socket, stream_id, headers, state.connection) + {:noreply, {socket, state}} + end + + def handle_info({:grpc_send_data, stream_id, data}, {socket, state}) do + # Check if we need to send accumulated headers first + accumulated = Map.get(state.accumulated_headers, stream_id, %{}) + + new_state = + if map_size(accumulated) > 0 do + Connection.send_headers(socket, stream_id, accumulated, state.connection) + # Clear accumulated headers for this stream + %{state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id)} + else + state + end + + # Send data frame for streaming + Connection.send_data(socket, stream_id, data, false, new_state.connection) + {:noreply, {socket, new_state}} + end + + def handle_info({:grpc_send_trailers, stream_id, trailers}, {socket, state}) do + # Check if we need to send accumulated headers first (for empty streams) + accumulated = Map.get(state.accumulated_headers, stream_id, %{}) + + new_state = + if map_size(accumulated) > 0 do + Connection.send_headers(socket, stream_id, accumulated, state.connection) + # Clear accumulated headers for this stream + %{state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id)} + else + state + end + + # Send trailers (headers with END_STREAM) for streaming + Connection.send_trailers(socket, stream_id, trailers, new_state.connection) + {:noreply, {socket, new_state}} + end + + def handle_info({:update_stream_state, stream_id, updated_stream_state}, {socket, state}) do + # Update the stream_state in the connection for bidi streaming + Logger.info( + "[Handler] Updating stream_state for stream #{stream_id}, bidi_pid=#{inspect(updated_stream_state.bidi_stream_pid)}" + ) + + connection = state.connection + + updated_connection = %{ + connection + | streams: Map.put(connection.streams, stream_id, updated_stream_state) + } + + {:noreply, {socket, %{state | connection: updated_connection}}} + end + + def handle_info(_msg, {socket, state}) do + {:noreply, {socket, state}} + end + + defp handle_preface(buffer, _socket, state) when byte_size(buffer) < 24 do + # Wait for more data (preface is 24 bytes) + {:continue, %{state | buffer: buffer}} + end + + defp handle_preface(<<@connection_preface, remaining::binary>>, socket, state) do + # Valid preface, initialize connection + try do + # Pass handler PID for streaming support + opts = Keyword.put(state.opts, :handler_pid, self()) + connection = Connection.init(socket, state.endpoint, state.servers, opts) + new_state = %{state | connection: connection, preface_received: true, buffer: <<>>} + + if byte_size(remaining) > 0 do + handle_frames(remaining, socket, new_state) + else + {:continue, new_state} + end + rescue + e -> + Logger.error( + "Error initializing connection: #{inspect(e)}\n#{Exception.format_stacktrace()}" + ) + + {:close, state} + end + end + + defp handle_preface(_buffer, _socket, state) do + # Invalid preface + Logger.error("Invalid HTTP/2 preface") + {:close, state} + end + + defp handle_frames(buffer, socket, state) do + handle_frames_loop( + buffer, + socket, + state.connection, + state.connection.remote_settings.max_frame_size, + state + ) + end + + # Optimized tail-recursive frame processing + defp handle_frames_loop(buffer, socket, connection, max_frame_size, original_state) do + case Frame.deserialize(buffer, max_frame_size) do + {{:ok, frame}, rest} -> + try do + new_connection = Connection.handle_frame(frame, socket, connection) + + if byte_size(rest) > 0 do + # Continue processing with updated connection + handle_frames_loop(rest, socket, new_connection, max_frame_size, original_state) + else + # All frames processed, return updated state + {:continue, %{original_state | connection: new_connection, buffer: <<>>}} + end + rescue + e in Errors.ConnectionError -> + Logger.error("Connection error: #{e.message}") + {:close, original_state} + + e in Errors.StreamError -> + Logger.error("Stream error: #{e.message}") + {:continue, %{original_state | connection: connection, buffer: rest}} + end + + {{:more, _partial}, <<>>} -> + # Need more data to parse frame + {:continue, %{original_state | connection: connection, buffer: buffer}} + + {{:error, error_code, reason}, _rest} -> + Logger.error("Frame deserialization error: #{reason} (code: #{error_code})") + {:close, original_state} + + nil -> + # No more frames to parse + {:continue, %{original_state | connection: connection, buffer: <<>>}} + end + end + + # API functions called by the adapter + + def read_full_body(pid) do + GenServer.call(pid, :read_full_body) + end + + def read_body(pid) do + GenServer.call(pid, :read_body) + end + + def send_data(pid, data, opts) do + GenServer.cast(pid, {:send_data, data, opts}) + end + + def send_headers(pid, headers) do + GenServer.cast(pid, {:send_headers, headers}) + end + + def set_headers(pid, headers) do + GenServer.cast(pid, {:set_headers, headers}) + end + + def set_trailers(pid, trailers) do + GenServer.cast(pid, {:set_trailers, trailers}) + end + + def send_trailers(pid, trailers) do + GenServer.cast(pid, {:send_trailers, trailers}) + end + + def get_headers(pid) do + GenServer.call(pid, :get_headers) + end + + def get_peer(pid) do + GenServer.call(pid, :get_peer) + end + + def get_cert(pid) do + GenServer.call(pid, :get_cert) + end + + def get_query_string(pid) do + GenServer.call(pid, :get_query_string) + end + + def get_bindings(pid) do + GenServer.call(pid, :get_bindings) + end + + def set_compressor(pid, compressor) do + GenServer.cast(pid, {:set_compressor, compressor}) + end +end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back b/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back new file mode 100644 index 000000000..5decd6535 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back @@ -0,0 +1,275 @@ +defmodule GRPC.Server.Adapters.ThousandIsland.HTTP2 do + @moduledoc """ + HTTP/2 frame handling for gRPC over ThousandIsland. + + This module provides HTTP/2 protocol support using the chatterbox library, + handling frame parsing, stream management, and HPACK compression. + """ + + require Logger + + @type stream_id :: non_neg_integer() + @type headers :: [{binary(), binary()}] + @type frame :: term() + + @doc """ + Initialize HTTP/2 connection state. + """ + def init_connection do + # Initialize HTTP/2 connection settings + %{ + streams: %{}, + next_stream_id: 1, + settings: default_settings(), + header_table: :hpack.new_context(), + last_stream_id: 0 + } + end + + @doc """ + Process incoming HTTP/2 frame data. + """ + def handle_data(data, state) do + case parse_frame(data, state) do + {:ok, frames, rest, new_state} -> + {:ok, frames, rest, new_state} + + {:error, reason} -> + {:error, reason} + + {:more, new_state} -> + {:more, new_state} + end + end + + @doc """ + Parse HTTP/2 frames from binary data. + """ + defp parse_frame(<<>>, state), do: {:more, state} + + defp parse_frame(data, state) when byte_size(data) < 9 do + # Need at least frame header (9 bytes) + {:more, Map.put(state, :buffer, data)} + end + + defp parse_frame(data, state) do + case :h2_frame.read(data) do + {:ok, frame, rest} -> + {frames, new_state} = process_frame(frame, state) + + if byte_size(rest) > 0 do + case parse_frame(rest, new_state) do + {:ok, more_frames, final_rest, final_state} -> + {:ok, frames ++ more_frames, final_rest, final_state} + + other -> + other + end + else + {:ok, frames, rest, new_state} + end + + {:error, reason} -> + {:error, reason} + + :incomplete -> + {:more, Map.put(state, :buffer, data)} + end + end + + @doc """ + Process a single HTTP/2 frame. + """ + defp process_frame({:headers, stream_id, headers, flags}, state) do + Logger.debug("Received HEADERS frame for stream #{stream_id}") + + # Decode HPACK-compressed headers + {:ok, decoded_headers, new_context} = + :hpack.decode(headers, state.header_table) + + new_state = %{ + state + | header_table: new_context, + last_stream_id: max(state.last_stream_id, stream_id) + } + + # Check if END_HEADERS flag is set + end_headers? = :h2_frame.is_flag_set(:end_headers, flags) + end_stream? = :h2_frame.is_flag_set(:end_stream, flags) + + stream_info = %{ + stream_id: stream_id, + headers: decoded_headers, + end_headers: end_headers?, + end_stream: end_stream?, + state: if(end_stream?, do: :half_closed_remote, else: :open) + } + + new_state = put_in(new_state, [:streams, stream_id], stream_info) + + {[{:headers, stream_id, decoded_headers, flags}], new_state} + end + + defp process_frame({:data, stream_id, data, flags}, state) do + Logger.debug("Received DATA frame for stream #{stream_id}: #{byte_size(data)} bytes") + + end_stream? = :h2_frame.is_flag_set(:end_stream, flags) + + if end_stream? do + # Update stream state + new_state = + update_in(state, [:streams, stream_id, :state], fn _ -> + :half_closed_remote + end) + + {[{:data, stream_id, data, true}], new_state} + else + {[{:data, stream_id, data, false}], state} + end + end + + defp process_frame({:settings, _stream_id, settings}, state) do + Logger.debug("Received SETTINGS frame: #{inspect(settings)}") + + # Merge received settings with our settings + new_settings = Map.merge(state.settings, Enum.into(settings, %{})) + new_state = %{state | settings: new_settings} + + # Must send SETTINGS ACK + ack_frame = encode_settings_ack() + + {[{:settings, settings}, {:send, ack_frame}], new_state} + end + + defp process_frame({:window_update, stream_id, increment}, state) do + Logger.debug("Received WINDOW_UPDATE for stream #{stream_id}: +#{increment}") + + # Update flow control window + # TODO: Implement proper flow control + {[{:window_update, stream_id, increment}], state} + end + + defp process_frame({:rst_stream, stream_id, error_code}, state) do + Logger.debug("Received RST_STREAM for stream #{stream_id}: #{error_code}") + + # Remove stream from state + new_state = + update_in(state, [:streams], fn streams -> + Map.delete(streams, stream_id) + end) + + {[{:rst_stream, stream_id, error_code}], new_state} + end + + defp process_frame({:ping, _stream_id, opaque_data}, state) do + Logger.debug("Received PING frame") + + # Must respond with PING ACK + pong_frame = encode_ping_ack(opaque_data) + + {[{:ping, opaque_data}, {:send, pong_frame}], state} + end + + defp process_frame({:goaway, _stream_id, last_stream_id, error_code, debug_data}, state) do + Logger.info("Received GOAWAY: last_stream=#{last_stream_id}, error=#{error_code}") + + {[{:goaway, last_stream_id, error_code, debug_data}], state} + end + + defp process_frame(frame, state) do + Logger.warning("Unhandled frame type: #{inspect(frame)}") + {[], state} + end + + @doc """ + Encode headers frame. + """ + def encode_headers(stream_id, headers, end_stream?, context) do + {:ok, encoded_headers, new_context} = :hpack.encode(headers, context) + + flags = + [:end_headers] ++ + if end_stream?, do: [:end_stream], else: [] + + frame = :h2_frame.headers(stream_id, flags, encoded_headers) + {:ok, :h2_frame.to_binary(frame), new_context} + end + + @doc """ + Encode data frame. + """ + def encode_data(stream_id, data, end_stream?) do + flags = if end_stream?, do: [:end_stream], else: [] + frame = :h2_frame.data(stream_id, flags, data) + {:ok, :h2_frame.to_binary(frame)} + end + + @doc """ + Encode settings frame. + """ + def encode_settings(settings) do + frame = :h2_frame.settings(0, [], settings) + :h2_frame.to_binary(frame) + end + + @doc """ + Encode settings ACK frame. + """ + def encode_settings_ack do + frame = :h2_frame.settings(0, [:ack], []) + :h2_frame.to_binary(frame) + end + + @doc """ + Encode ping ACK frame. + """ + def encode_ping_ack(opaque_data) do + frame = :h2_frame.ping(0, [:ack], opaque_data) + :h2_frame.to_binary(frame) + end + + @doc """ + Encode RST_STREAM frame. + """ + def encode_rst_stream(stream_id, error_code) do + frame = :h2_frame.rst_stream(stream_id, error_code) + :h2_frame.to_binary(frame) + end + + @doc """ + Encode GOAWAY frame. + """ + def encode_goaway(last_stream_id, error_code, debug_data \\ <<>>) do + frame = :h2_frame.goaway(0, last_stream_id, error_code, debug_data) + :h2_frame.to_binary(frame) + end + + @doc """ + Default HTTP/2 settings. + """ + def default_settings do + %{ + header_table_size: 4096, + enable_push: 0, + max_concurrent_streams: 100, + initial_window_size: 65535, + max_frame_size: 16384, + max_header_list_size: :infinity + } + end + + @doc """ + Validate HTTP/2 connection preface. + """ + def validate_preface(<<"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", rest::binary>>) do + {:ok, rest} + end + + def validate_preface(data) when byte_size(data) < 24 do + {:more, data} + end + + def validate_preface(_data) do + {:error, :invalid_preface} + end +end diff --git a/grpc_server/lib/grpc/server/bidi_stream.ex b/grpc_server/lib/grpc/server/bidi_stream.ex new file mode 100644 index 000000000..f5fc6cb3f --- /dev/null +++ b/grpc_server/lib/grpc/server/bidi_stream.ex @@ -0,0 +1,168 @@ +defmodule GRPC.Server.BidiStream do + @moduledoc """ + Manages bidirectional streaming request messages using a supervised Task. + + This module stores incoming request messages for a bidi streaming RPC + and provides them as a lazy enumerable to the handler. It blocks when + no messages are available and wakes up when new messages arrive. + + Uses a Task instead of GenServer for lighter weight and automatic supervision. + """ + require Logger + + @doc """ + Starts a supervised task that manages messages for a bidi stream. + Returns {:ok, pid}. + """ + def start_link(stream_id, initial_messages \\ []) do + Task.Supervisor.start_child(GRPC.Server.StreamTaskSupervisor, fn -> + loop(%{ + stream_id: stream_id, + messages: :queue.from_list(initial_messages), + waiting_caller: nil, + stream_finished: false + }) + end) + end + + @doc """ + Adds decoded messages to the stream. + """ + def put_messages(pid, messages) when is_list(messages) do + send(pid, {:put_messages, messages}) + :ok + end + + @doc """ + Marks the stream as finished (client sent END_STREAM). + """ + def finish(pid) do + send(pid, :finish) + :ok + end + + @doc """ + Cancels the stream (client sent RST_STREAM). + """ + def cancel(pid) do + send(pid, :cancel) + :ok + end + + @doc """ + Creates a lazy enumerable that reads messages from this stream. + Blocks when no messages are available. + """ + def to_enum(pid) do + Stream.resource( + fn -> pid end, + fn pid -> + case get_next_message(pid) do + {:ok, message} -> {[message], pid} + :done -> {:halt, pid} + end + end, + fn _pid -> :ok end + ) + end + + ## Private + + defp get_next_message(pid) do + ref = make_ref() + send(pid, {:next_message, self(), ref}) + + receive do + {^ref, response} -> response + end + end + + defp loop(state) do + receive do + {:next_message, caller_pid, ref} -> + Logger.info( + "[BidiStream #{state.stream_id}] Received :next_message, queue_size=#{:queue.len(state.messages)}, finished=#{state.stream_finished}" + ) + + case :queue.out(state.messages) do + {{:value, message}, new_queue} -> + # Return message immediately + Logger.debug("[BidiStream #{state.stream_id}] Returning message from queue") + send(caller_pid, {ref, {:ok, message}}) + loop(%{state | messages: new_queue}) + + {:empty, _} -> + if state.stream_finished do + # No more messages and stream is done + Logger.debug("[BidiStream #{state.stream_id}] Stream finished, no more messages") + send(caller_pid, {ref, :done}) + # Exit the task - stream is complete + :ok + else + # No messages yet - store caller and wait + Logger.debug("[BidiStream #{state.stream_id}] Queue empty, blocking caller") + loop(%{state | waiting_caller: {caller_pid, ref}}) + end + end + + {:put_messages, new_messages} -> + Logger.info( + "[BidiStream #{state.stream_id}] Received #{length(new_messages)} new messages" + ) + + # Add messages to queue + new_queue = + Enum.reduce(new_messages, state.messages, fn msg, queue -> + :queue.in(msg, queue) + end) + + # If someone is waiting, reply with first message + Logger.debug( + "[BidiStream #{state.stream_id}] After adding messages, queue_size=#{:queue.len(new_queue)}, has_waiting_caller=#{not is_nil(state.waiting_caller)}" + ) + + if state.waiting_caller do + {caller_pid, ref} = state.waiting_caller + + case :queue.out(new_queue) do + {{:value, message}, final_queue} -> + send(caller_pid, {ref, {:ok, message}}) + loop(%{state | messages: final_queue, waiting_caller: nil}) + + {:empty, _} -> + # Shouldn't happen but handle gracefully + loop(%{state | messages: new_queue}) + end + else + loop(%{state | messages: new_queue}) + end + + :finish -> + Logger.info( + "[BidiStream #{state.stream_id}] Received :finish, has_waiting_caller=#{not is_nil(state.waiting_caller)}" + ) + + # Mark stream as finished + if state.waiting_caller do + {caller_pid, ref} = state.waiting_caller + # Reply to waiting caller that stream is done + send(caller_pid, {ref, :done}) + # Exit the task - stream is complete + :ok + else + loop(%{state | stream_finished: true}) + end + + :cancel -> + Logger.info("[BidiStream #{state.stream_id}] Received :cancel (RST_STREAM from client)") + # If someone is waiting, reply that stream is done + if state.waiting_caller do + {caller_pid, ref} = state.waiting_caller + send(caller_pid, {ref, :done}) + end + + # Exit the task - stream was cancelled + :ok + end + end +end diff --git a/grpc_server/lib/grpc/server/cache.ex b/grpc_server/lib/grpc/server/cache.ex new file mode 100644 index 000000000..26db6991b --- /dev/null +++ b/grpc_server/lib/grpc/server/cache.ex @@ -0,0 +1,90 @@ +defmodule GRPC.Server.Cache do + @moduledoc """ + ETS-based cache for frequently accessed server metadata. + Improves performance by avoiding repeated Enum.find operations. + """ + + @table_name :grpc_server_cache + + def init do + case :ets.whereis(@table_name) do + :undefined -> + :ets.new(@table_name, [:named_table, :public, :set, {:read_concurrency, true}]) + + _tid -> + :ok + end + end + + @doc """ + Find codec by name for a given server. + Uses ETS cache to avoid repeated Enum.find calls. + """ + def find_codec(server, subtype) do + cache_key = {:codec, server, subtype} + + case :ets.lookup(@table_name, cache_key) do + [{^cache_key, codec}] -> + codec + + [] -> + codec = Enum.find(server.__meta__(:codecs), nil, fn c -> c.name() == subtype end) + :ets.insert(@table_name, {cache_key, codec}) + codec + end + end + + @doc """ + Find compressor by name for a given server. + Uses ETS cache to avoid repeated Enum.find calls. + """ + def find_compressor(server, encoding) do + cache_key = {:compressor, server, encoding} + + case :ets.lookup(@table_name, cache_key) do + [{^cache_key, compressor}] -> + compressor + + [] -> + compressor = + Enum.find(server.__meta__(:compressors), nil, fn c -> c.name() == encoding end) + + :ets.insert(@table_name, {cache_key, compressor}) + compressor + end + end + + @doc """ + Find RPC definition by method name for a given server. + Uses ETS cache to avoid repeated Enum.find calls. + """ + def find_rpc(server, method_name) do + cache_key = {:rpc, server, method_name} + + case :ets.lookup(@table_name, cache_key) do + [{^cache_key, rpc}] -> + rpc + + [] -> + rpc_calls = server.__meta__(:service).__rpc_calls__() + + rpc = + Enum.find(rpc_calls, nil, fn {name, _, _, _} -> + Atom.to_string(name) == method_name + end) + + :ets.insert(@table_name, {cache_key, rpc}) + rpc + end + end + + @doc """ + Clear the cache. Useful for testing or when server configuration changes. + """ + def clear do + case :ets.whereis(@table_name) do + :undefined -> :ok + _tid -> :ets.delete_all_objects(@table_name) + end + end +end diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex new file mode 100644 index 000000000..62affeb56 --- /dev/null +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -0,0 +1,982 @@ +defmodule GRPC.Server.HTTP2.Connection do + @moduledoc """ + Represents the state of an HTTP/2 connection for gRPC. + """ + require Logger + + alias GRPC.HTTP2.Frame + alias GRPC.HTTP2.Settings + alias GRPC.HTTP2.Errors + alias GRPC.Server.HTTP2.StreamState + + # Inline hot path functions for performance + @compile {:inline, + extract_messages: 2, send_frame: 3, handle_headers_frame: 3, send_grpc_trailers: 4} + + defstruct local_settings: %Settings{}, + remote_settings: %Settings{}, + fragment_frame: nil, + send_hpack_state: HPAX.new(4096), + recv_hpack_state: HPAX.new(4096), + send_window_size: 65_535, + recv_window_size: 65_535, + streams: %{}, + # Map of stream_id => stream_state + next_stream_id: 2, + # Client streams are odd, server push is even + endpoint: nil, + servers: %{}, + socket: nil, + handler_pid: nil + + @typedoc "Encapsulates the state of an HTTP/2 connection" + @type t :: %__MODULE__{ + local_settings: Settings.t(), + remote_settings: Settings.t(), + fragment_frame: Frame.Headers.t() | nil, + send_hpack_state: term(), + recv_hpack_state: term(), + send_window_size: non_neg_integer(), + recv_window_size: non_neg_integer(), + streams: %{non_neg_integer() => map()}, + next_stream_id: non_neg_integer(), + endpoint: atom(), + servers: %{String.t() => [module()]}, + socket: ThousandIsland.Socket.t() | nil + } + + @doc """ + Initializes a new HTTP/2 connection. + """ + @spec init(ThousandIsland.Socket.t(), atom(), %{String.t() => [module()]}, keyword()) :: t() + def init(socket, endpoint, servers, opts \\ []) do + handler_pid = Keyword.get(opts, :handler_pid) + Logger.debug("[Connection.init] handler_pid=#{inspect(handler_pid)}") + + connection = %__MODULE__{ + local_settings: struct!(Settings, Keyword.get(opts, :local_settings, [])), + endpoint: endpoint, + servers: servers, + socket: socket, + handler_pid: handler_pid + } + + # Send initial SETTINGS frame per RFC9113§3.4 + settings_frame = %Frame.Settings{ + ack: false, + settings: [ + header_table_size: connection.local_settings.header_table_size, + max_concurrent_streams: connection.local_settings.max_concurrent_streams, + initial_window_size: connection.local_settings.initial_window_size, + max_frame_size: connection.local_settings.max_frame_size, + max_header_list_size: connection.local_settings.max_header_list_size + ] + } + + send_frame(settings_frame, socket, connection) + + connection + end + + @doc """ + Send headers for streaming response. + """ + def send_headers(socket, stream_id, headers, connection) do + # Encode headers using HPAX - convert map to list of tuples + Logger.debug("[send_headers] stream_id=#{stream_id}, headers=#{inspect(headers)}") + headers_list = if is_map(headers), do: Map.to_list(headers), else: headers + {header_block, _new_hpack} = HPAX.encode(:no_store, headers_list, connection.send_hpack_state) + + # Send HEADERS frame without END_STREAM + frame = %Frame.Headers{ + stream_id: stream_id, + fragment: header_block, + end_stream: false, + end_headers: true + } + + send_frame(frame, socket, connection) + end + + @doc """ + Send data frame for streaming response. + """ + def send_data(socket, stream_id, data, end_stream, connection) do + # Send DATA frame + frame = %Frame.Data{ + stream_id: stream_id, + data: data, + end_stream: end_stream + } + + send_frame(frame, socket, connection) + end + + @doc """ + Send trailers (headers with END_STREAM) for streaming response. + """ + def send_trailers(socket, stream_id, trailers, connection) do + # Encode custom metadata (handles -bin suffix base64 encoding) + # Note: encode_metadata filters out reserved headers like grpc-status + encoded_custom = GRPC.Transport.HTTP2.encode_metadata(trailers) + + # Re-add reserved headers (grpc-status, etc) that were filtered out + encoded_trailers = + Map.merge( + Map.take(trailers, ["grpc-status", "grpc-message"]), + encoded_custom + ) + + # Convert map to list + trailer_list = Map.to_list(encoded_trailers) + + {trailer_block, _new_hpack} = + HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) + + # Send HEADERS frame with END_STREAM + frame = %Frame.Headers{ + stream_id: stream_id, + fragment: trailer_block, + end_stream: true, + end_headers: true + } + + send_frame(frame, socket, connection) + end + + @doc """ + Set custom headers for a stream (used for custom_metadata test). + """ + def set_stream_custom_headers(connection, stream_id, headers) do + case Map.get(connection.streams, stream_id) do + nil -> + connection + + stream_state -> + updated_stream = %{ + stream_state + | custom_headers: Map.merge(stream_state.custom_headers, headers) + } + + %{connection | streams: Map.put(connection.streams, stream_id, updated_stream)} + end + end + + @doc """ + Set custom trailers for a stream (used for custom_metadata test). + """ + def set_stream_custom_trailers(connection, stream_id, trailers) do + case Map.get(connection.streams, stream_id) do + nil -> + connection + + stream_state -> + updated_stream = %{ + stream_state + | custom_trailers: Map.merge(stream_state.custom_trailers, trailers) + } + + %{connection | streams: Map.put(connection.streams, stream_id, updated_stream)} + end + end + + @doc """ + Get the stream state for a specific stream ID. + Returns nil if stream doesn't exist. + """ + def get_stream(connection, stream_id) do + Map.get(connection.streams, stream_id) + end + + @doc """ + Handles an incoming HTTP/2 frame. + """ + @spec handle_frame(Frame.frame(), ThousandIsland.Socket.t(), t()) :: t() + def handle_frame(frame, socket, connection) do + do_handle_frame(frame, socket, connection) + end + + defp do_handle_frame( + %Frame.Continuation{end_headers: true, stream_id: stream_id} = frame, + socket, + %__MODULE__{fragment_frame: %Frame.Headers{stream_id: stream_id}} = connection + ) do + header_block = connection.fragment_frame.fragment <> frame.fragment + header_frame = %{connection.fragment_frame | end_headers: true, fragment: header_block} + do_handle_frame(header_frame, socket, %{connection | fragment_frame: nil}) + end + + defp do_handle_frame( + %Frame.Continuation{end_headers: false, stream_id: stream_id} = frame, + _socket, + %__MODULE__{fragment_frame: %Frame.Headers{stream_id: stream_id}} = connection + ) do + fragment = connection.fragment_frame.fragment <> frame.fragment + # TODO: Check max header list size + fragment_frame = %{connection.fragment_frame | fragment: fragment} + %{connection | fragment_frame: fragment_frame} + end + + defp do_handle_frame(_frame, _socket, %__MODULE__{fragment_frame: %Frame.Headers{}}) do + connection_error!("Expected CONTINUATION frame (RFC9113§6.10)") + end + + defp do_handle_frame(%Frame.Settings{ack: true}, _socket, connection) do + Logger.info("[Connection] Received SETTINGS ACK") + connection + end + + defp do_handle_frame(%Frame.Settings{ack: false} = frame, socket, connection) do + Logger.info("[Connection] Received SETTINGS, sending ACK") + %Frame.Settings{ack: true, settings: []} |> send_frame(socket, connection) + + remote_settings = apply_settings(connection.remote_settings, frame.settings) + + # Update HPACK table size if changed + send_hpack_state = + if remote_settings.header_table_size != connection.remote_settings.header_table_size do + HPAX.resize(connection.send_hpack_state, remote_settings.header_table_size) + else + connection.send_hpack_state + end + + %{connection | remote_settings: remote_settings, send_hpack_state: send_hpack_state} + end + + defp do_handle_frame(%Frame.Ping{ack: false, payload: data}, socket, connection) do + Logger.info("[Connection] Received PING, sending ACK") + %Frame.Ping{ack: true, payload: data} |> send_frame(socket, connection) + connection + end + + defp do_handle_frame(%Frame.Ping{ack: true}, _socket, connection) do + # Ignore PING ACKs for now (we don't track sent PINGs yet) + connection + end + + defp do_handle_frame(%Frame.Goaway{} = frame, _socket, connection) do + Logger.info( + "Received GOAWAY: last_stream_id=#{frame.last_stream_id}, error=#{frame.error_code}" + ) + + # TODO: Handle graceful shutdown + connection + end + + defp do_handle_frame(%Frame.WindowUpdate{stream_id: 0} = frame, _socket, connection) do + Logger.info("[Connection] WINDOW_UPDATE connection-level: +#{frame.size_increment}") + new_window = connection.send_window_size + frame.size_increment + + if new_window > 2_147_483_647 do + connection_error!("Flow control window overflow (RFC9113§6.9.1)") + end + + %{connection | send_window_size: new_window} + end + + defp do_handle_frame(%Frame.Headers{} = frame, socket, connection) do + Logger.info( + "[RECV_HEADERS] stream=#{frame.stream_id}, end_headers=#{frame.end_headers}, end_stream=#{frame.end_stream}" + ) + + if frame.end_headers do + handle_headers_frame(frame, socket, connection) + else + # Start accumulating CONTINUATION frames + %{connection | fragment_frame: frame} + end + end + + defp do_handle_frame(%Frame.Data{stream_id: stream_id} = frame, socket, connection) do + Logger.info( + "[RECV_DATA] stream=#{stream_id}, size=#{byte_size(frame.data)}, end_stream=#{frame.end_stream}, stream_exists=#{Map.has_key?(connection.streams, stream_id)}" + ) + + case Map.get(connection.streams, stream_id) do + nil -> + Logger.warning( + "[IGNORE_DATA] stream=#{stream_id} not found, size=#{byte_size(frame.data)} (stream already closed)" + ) + + connection + + stream_state -> + # Send WINDOW_UPDATE to allow client to continue sending + data_size = byte_size(frame.data) + + # Only send WINDOW_UPDATE if there's actual data (non-zero increment) + if data_size > 0 do + Logger.debug("[WINDOW_UPDATE] stream=#{stream_id}, size=#{data_size}") + # Send connection-level WINDOW_UPDATE + conn_window_update = %Frame.WindowUpdate{stream_id: 0, size_increment: data_size} + send_frame(conn_window_update, socket, connection) + + # Send stream-level WINDOW_UPDATE + stream_window_update = %Frame.WindowUpdate{ + stream_id: stream_id, + size_increment: data_size + } + + send_frame(stream_window_update, socket, connection) + end + + # Accumulate data in stream buffer + updated_stream = %{stream_state | data_buffer: stream_state.data_buffer <> frame.data} + + # Mark if END_STREAM was received (stream half-closed remote) + updated_stream = + if frame.end_stream do + %{updated_stream | end_stream_received: true} + else + updated_stream + end + + updated_connection = %{ + connection + | streams: Map.put(connection.streams, stream_id, updated_stream) + } + + # For bidirectional streaming, process when we have complete messages + # For other types, wait for END_STREAM + should_process = + if frame.end_stream do + true + else + # For bidi, check if we have complete messages AND start processing + updated_stream.is_bidi_streaming and has_complete_message?(updated_stream.data_buffer) + end + + if should_process do + # Process the request (for bidi, this starts the handler on first message) + Logger.info( + "[Connection] Stream #{stream_id} processing (end_stream=#{frame.end_stream}, bidi=#{updated_stream.is_bidi_streaming})" + ) + + process_grpc_request(socket, updated_stream, updated_connection, frame.end_stream) + else + # More data coming (non-bidi case) + Logger.debug( + "[Connection] Stream #{stream_id} waiting for more data (buffer=#{byte_size(updated_stream.data_buffer)} bytes)" + ) + + updated_connection + end + end + end + + defp do_handle_frame( + %Frame.RstStream{stream_id: stream_id, error_code: error}, + _socket, + connection + ) do + stream_exists = Map.has_key?(connection.streams, stream_id) + + Logger.info( + "[RECV_RST_STREAM] stream=#{stream_id}, error=#{error}, stream_exists=#{stream_exists}" + ) + + # Notify BidiStream (if exists) that stream was cancelled + case Process.get({:bidi_stream_pid, stream_id}) do + nil -> + Logger.debug("[Connection] No BidiStream found for stream #{stream_id}") + :ok + + pid -> + Logger.info("[Connection] Cancelling BidiStream for stream #{stream_id}") + GRPC.Server.BidiStream.cancel(pid) + end + + # Clean up process dictionary + Process.delete({:bidi_stream_pid, stream_id}) + Process.delete({:bidi_stream_state, stream_id}) + Process.delete({:grpc_custom_trailers, stream_id}) + + # Remove stream from streams map + streams = Map.delete(connection.streams, stream_id) + Logger.debug("[REMOVE_STREAM] stream=#{stream_id}, remaining_streams=#{map_size(streams)}") + %{connection | streams: streams} + end + + defp do_handle_frame(%Frame.WindowUpdate{stream_id: stream_id} = frame, _socket, connection) do + Logger.info("[Connection] WINDOW_UPDATE stream=#{stream_id}: +#{frame.size_increment}") + # TODO: Update stream send window + connection + end + + defp do_handle_frame(%Frame.Priority{}, _socket, connection) do + # gRPC doesn't use priority, ignore + connection + end + + defp do_handle_frame(%Frame.PushPromise{}, _socket, _connection) do + # Server push not supported in gRPC + connection_error!("PUSH_PROMISE not supported (RFC9113§8.4)") + end + + defp do_handle_frame(%Frame.Unknown{}, _socket, connection) do + # Ignore unknown frames per RFC9113§4.1 + connection + end + + defp handle_headers_frame(frame, _socket, connection) do + Logger.info("[handle_headers_frame] Decoding HPACK for stream #{frame.stream_id}") + + case HPAX.decode(frame.fragment, connection.recv_hpack_state) do + {:ok, headers, new_hpack_state} -> + Logger.info( + "[handle_headers_frame] Decoded headers for stream #{frame.stream_id}: #{inspect(headers)}" + ) + + # Create stream state from headers + stream_state = + StreamState.from_headers( + frame.stream_id, + headers, + connection.local_settings.initial_window_size + ) + + # Add handler_pid for streaming support + stream_state = %{stream_state | handler_pid: connection.handler_pid} + + # Check if this is bidirectional streaming + # For bidi, we need to process messages as they arrive (not wait for END_STREAM) + is_bidi = + GRPC.Server.HTTP2.Dispatcher.is_bidi_streaming?(stream_state.path, connection.servers) + + stream_state = %{stream_state | is_bidi_streaming: is_bidi} + + if is_bidi do + Logger.info( + "[handle_headers_frame] Stream #{frame.stream_id} is bidirectional streaming" + ) + end + + # Store stream in connection + streams = Map.put(connection.streams, frame.stream_id, stream_state) + + %{connection | recv_hpack_state: new_hpack_state, streams: streams} + + {:error, reason} -> + connection_error!("HPACK decode error: #{inspect(reason)}") + end + end + + defp apply_settings(settings, []), do: settings + # Convert map to keyword list if needed (for compatibility with grpc_core Frame.Settings) + defp apply_settings(settings, settings_map) when is_map(settings_map) do + apply_settings(settings, Map.to_list(settings_map)) + end + + defp apply_settings(settings, [{:header_table_size, value} | rest]) do + apply_settings(%{settings | header_table_size: value}, rest) + end + + defp apply_settings(settings, [{:enable_push, value} | rest]) do + apply_settings(%{settings | enable_push: value}, rest) + end + + defp apply_settings(settings, [{:max_concurrent_streams, value} | rest]) do + apply_settings(%{settings | max_concurrent_streams: value}, rest) + end + + defp apply_settings(settings, [{:initial_window_size, value} | rest]) do + if value > 2_147_483_647 do + connection_error!("Invalid initial window size (RFC9113§6.5.2)") + end + + apply_settings(%{settings | initial_window_size: value}, rest) + end + + defp apply_settings(settings, [{:max_frame_size, value} | rest]) do + if value < 16_384 or value > 16_777_215 do + connection_error!("Invalid max frame size (RFC9113§6.5.2)") + end + + apply_settings(%{settings | max_frame_size: value}, rest) + end + + defp apply_settings(settings, [{:max_header_list_size, value} | rest]) do + apply_settings(%{settings | max_header_list_size: value}, rest) + end + + defp apply_settings(settings, [_unknown | rest]) do + # Ignore unknown settings per RFC9113§6.5.2 + apply_settings(settings, rest) + end + + defp send_frame(frame, socket, connection) do + Logger.debug( + "[SEND_FRAME] type=#{inspect(frame.__struct__)}, stream=#{Map.get(frame, :stream_id, :none)}, flags=#{inspect(Map.get(frame, :flags, []))}}" + ) + + max_frame_size = connection.remote_settings.max_frame_size + iodata = Frame.serialize(frame, max_frame_size) + + # Send all frame data at once (iodata is already properly formatted) + # Skip sending if socket is nil (test mode) + if socket != nil do + ThousandIsland.Socket.send(socket, iodata) + end + + :ok + end + + defp connection_error!(message) do + raise Errors.ConnectionError, message + end + + # Process a complete gRPC request (called when DATA arrives) + # For bidi streaming, this may be called multiple times as messages arrive + defp process_grpc_request(socket, stream_state, connection, end_stream) do + Logger.debug( + "[process_grpc_request] Processing gRPC call: #{stream_state.path} (end_stream=#{end_stream}, bidi=#{stream_state.is_bidi_streaming})" + ) + + # For bidi streaming that's already started, feed new messages to the BidiStream + if stream_state.is_bidi_streaming and stream_state.handler_started do + Logger.debug("[process_grpc_request] Bidi stream already started, feeding new messages") + stream_state = extract_messages_from_buffer(stream_state) + + # Feed messages to the BidiStream Task after decoding them + if length(stream_state.message_buffer) > 0 and stream_state.bidi_stream_pid do + Logger.info( + "[Connection] Decoding and feeding #{length(stream_state.message_buffer)} messages to BidiStream #{stream_state.stream_id}, pid=#{inspect(stream_state.bidi_stream_pid)}" + ) + + # Decode the messages using codec, compressor, and RPC from stream_state + decoded_messages = decode_stream_messages(stream_state.message_buffer, stream_state) + + GRPC.Server.BidiStream.put_messages(stream_state.bidi_stream_pid, decoded_messages) + # Clear both message_buffer and data_buffer after feeding + _stream_state = %{stream_state | message_buffer: [], data_buffer: <<>>} + end + + # If END_STREAM, mark the BidiStream as finished + if end_stream and stream_state.bidi_stream_pid do + Logger.info( + "[Connection] Marking bidi stream #{stream_state.stream_id} as finished, pid=#{inspect(stream_state.bidi_stream_pid)}" + ) + + GRPC.Server.BidiStream.finish(stream_state.bidi_stream_pid) + end + + # Update stream state + streams = Map.put(connection.streams, stream_state.stream_id, stream_state) + %{connection | streams: streams} + else + # First time processing (or non-bidi) - start the handler + process_grpc_request_initial(socket, stream_state, connection, end_stream) + end + end + + defp process_grpc_request_initial(socket, stream_state, connection, end_stream) do + Logger.debug("[process_grpc_request_initial] Starting gRPC handler") + + try do + # Extract messages from data_buffer + stream_state = extract_messages_from_buffer(stream_state) + + # Mark handler as started for bidi + stream_state = + if stream_state.is_bidi_streaming do + %{stream_state | handler_started: true} + else + stream_state + end + + # Update stream in connection before dispatching + connection = %{ + connection + | streams: Map.put(connection.streams, stream_state.stream_id, stream_state) + } + + # Use dispatcher to handle the gRPC call + Logger.debug("[process_grpc_request_initial] Dispatching gRPC call: #{stream_state.path}") + + Logger.debug( + "[process_grpc_request] Message buffer: #{length(stream_state.message_buffer)} messages" + ) + + updated_connection = + case GRPC.Server.HTTP2.Dispatcher.dispatch( + stream_state, + connection.servers, + connection.endpoint, + connection + ) do + {:ok, :streaming_done} -> + # Streaming was handled incrementally via messages, nothing more to send + Logger.debug("[process_grpc_request] Streaming completed") + + # For bidi streaming, update stream_state with full state from process dictionary + # (includes bidi_stream_pid, codec, compressor, rpc) + if stream_state.is_bidi_streaming do + updated_stream_state = Process.get({:bidi_stream_state, stream_state.stream_id}) + + Logger.info( + "[Connection] Updated stream_state for stream #{stream_state.stream_id}: bidi_pid=#{inspect(updated_stream_state.bidi_stream_pid)}" + ) + + %{ + connection + | streams: + Map.put(connection.streams, stream_state.stream_id, updated_stream_state) + } + else + connection + end + + {:ok, response_headers, response_data, trailers} -> + Logger.debug("[process_grpc_request] RPC succeeded, sending response") + # OPTIMIZATION: Send all frames in one syscall using iolist + send_grpc_response_batch( + socket, + stream_state.stream_id, + response_headers, + response_data, + trailers, + connection + ) + + {:error, %GRPC.RPCError{} = error} -> + Logger.error("[process_grpc_request] RPC error: #{inspect(error)}") + # Check if stream still exists (might have been removed by concurrent error handling) + if Map.has_key?(connection.streams, stream_state.stream_id) do + updated_connection = + send_grpc_error(socket, stream_state.stream_id, error, connection) + + # Remove stream immediately after error and return (don't continue processing) + Logger.info( + "[process_grpc_request] Removing stream #{stream_state.stream_id} after error" + ) + + %{ + updated_connection + | streams: Map.delete(updated_connection.streams, stream_state.stream_id) + } + else + Logger.warning( + "[process_grpc_request] Stream #{stream_state.stream_id} already removed, skipping error send" + ) + + connection + end + end + + # Check if stream still exists (it may have been removed by error handling) + stream_exists = Map.has_key?(updated_connection.streams, stream_state.stream_id) + + # For bidi streaming, keep the stream alive to receive more messages + # But if it's the end of the stream (client sent END_STREAM), clean up + if stream_state.is_bidi_streaming and not end_stream and stream_exists do + Logger.debug("[process_grpc_request] Keeping bidi stream #{stream_state.stream_id} alive") + updated_connection + else + # If stream doesn't exist, it was already removed (e.g., by send_grpc_error) + if not stream_exists do + updated_connection + else + # If bidi streaming and END_STREAM, mark as finished + if stream_state.is_bidi_streaming and end_stream do + # Get bidi_stream_pid from updated connection state + updated_stream_state = updated_connection.streams[stream_state.stream_id] + + if updated_stream_state && updated_stream_state.bidi_stream_pid do + Logger.info( + "[Connection] Marking bidi stream #{stream_state.stream_id} as finished (initial END_STREAM), pid=#{inspect(updated_stream_state.bidi_stream_pid)}" + ) + + GRPC.Server.BidiStream.finish(updated_stream_state.bidi_stream_pid) + end + end + + # Remove stream from connection + Logger.debug("[process_grpc_request] Removing stream #{stream_state.stream_id}") + + %{ + updated_connection + | streams: Map.delete(updated_connection.streams, stream_state.stream_id) + } + end + end + rescue + e -> + Logger.error("[process_grpc_request] Exception: #{inspect(e)}") + + Logger.error( + "[process_grpc_request] Stacktrace:\n#{Exception.format_stacktrace(__STACKTRACE__)}" + ) + + # Check if stream still exists (might have been removed by concurrent error handling) + if Map.has_key?(connection.streams, stream_state.stream_id) do + updated_connection = + send_grpc_error( + socket, + stream_state.stream_id, + %{status: :internal, message: "Internal error"}, + connection + ) + + %{ + updated_connection + | streams: Map.delete(updated_connection.streams, stream_state.stream_id) + } + else + Logger.warning( + "[process_grpc_request] Stream #{stream_state.stream_id} already removed in rescue, skipping error send" + ) + + connection + end + end + end + + defp send_grpc_trailers(socket, stream_id, trailers, connection) do + Logger.debug( + "[send_grpc_trailers] Sending trailers for stream #{stream_id}: #{inspect(trailers)}" + ) + + # Convert map to list of tuples for HPAX + trailer_list = Map.to_list(trailers) + + # Encode trailers using HPACK + {trailer_block, new_hpack} = HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) + + # Send HEADERS frame with END_STREAM flag + headers_frame = %Frame.Headers{ + stream_id: stream_id, + fragment: trailer_block, + end_stream: true, + end_headers: true + } + + send_frame(headers_frame, socket, connection) + + # Return updated connection with new HPACK state + %{connection | send_hpack_state: new_hpack} + end + + # OPTIMIZATION: Send headers + data + trailers in one syscall (Bandit-style batching) + defp send_grpc_response_batch( + socket, + stream_id, + response_headers, + response_data, + trailers, + connection + ) do + max_frame_size = connection.remote_settings.max_frame_size + + # Encode headers frame if provided + {headers_iodata, hpack_after_headers} = + if response_headers && response_headers != [] do + header_list = + if is_map(response_headers), do: Map.to_list(response_headers), else: response_headers + + {encoded_headers, new_hpack} = + HPAX.encode(:no_store, header_list, connection.send_hpack_state) + + headers_frame = %Frame.Headers{ + stream_id: stream_id, + fragment: encoded_headers, + end_stream: false, + end_headers: true + } + + {Frame.serialize(headers_frame, max_frame_size), new_hpack} + else + {[], connection.send_hpack_state} + end + + # Encode data frame + data_frame = %Frame.Data{ + stream_id: stream_id, + data: response_data, + end_stream: false + } + + data_iodata = Frame.serialize(data_frame, max_frame_size) + + # Encode trailers frame + trailer_list = Map.to_list(trailers) + {trailer_block, final_hpack} = HPAX.encode(:no_store, trailer_list, hpack_after_headers) + + trailers_frame = %Frame.Headers{ + stream_id: stream_id, + fragment: trailer_block, + end_stream: true, + end_headers: true + } + + trailers_iodata = Frame.serialize(trailers_frame, max_frame_size) + + # Combine all frames into one iolist and send in single syscall + combined_iodata = [headers_iodata, data_iodata, trailers_iodata] + + # Skip sending if socket is nil (test mode) + if socket != nil do + ThousandIsland.Socket.send(socket, combined_iodata) + end + + # Return updated connection with new HPACK state + %{connection | send_hpack_state: final_hpack} + end + + defp extract_messages_from_buffer(stream_state) do + # Extract 5-byte length-prefixed messages from data_buffer + Logger.info( + "[Connection] Extracting messages from data_buffer (#{byte_size(stream_state.data_buffer)} bytes)" + ) + + {messages, remaining} = extract_messages(stream_state.data_buffer, []) + # Reverse since we build list backwards for performance + extracted_count = length(messages) + + Logger.info( + "[Connection] Extracted #{extracted_count} messages, #{byte_size(remaining)} bytes remaining" + ) + + %{stream_state | message_buffer: Enum.reverse(messages), data_buffer: remaining} + end + + # Optimized: prepend instead of append for O(1) instead of O(n) + defp extract_messages( + <>, + acc + ) do + message = %{compressed: compressed == 1, data: payload} + extract_messages(rest, [message | acc]) + end + + defp extract_messages(buffer, acc) do + # Not enough data for a complete message + {acc, buffer} + end + + defp decode_stream_messages(message_buffer, stream_state) do + # Extract request type from RPC definition + # RPC format: {name, {request_module, is_stream?}, {reply_module, is_stream?}, options} + {_name, {request_module, _is_stream?}, _reply, _options} = stream_state.rpc + codec = stream_state.codec + compressor = stream_state.compressor + + Enum.map(message_buffer, fn %{compressed: compressed?, data: data} -> + # Decompress if needed + data = + if compressed? and compressor do + compressor.decompress(data) + else + data + end + + # Decode protobuf with the request module + codec.decode(data, request_module) + end) + end + + # Made public so Handler can call it when deadline exceeded during send_reply + def send_grpc_error(socket, stream_id, error, connection) do + status = Map.get(error, :status, :unknown) + message = Map.get(error, :message, "Unknown error") + + # Convert atom status to integer code + status_code = if is_atom(status), do: apply(GRPC.Status, status, []), else: status + + stream_state = Map.get(connection.streams, stream_id) + + # Se o stream não existe OU já enviamos erro, não fazer nada + if !stream_state do + Logger.warning("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream not found") + connection + else + if stream_state.error_sent do + Logger.warning("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - error already sent") + connection + else + headers_sent = stream_state.headers_sent + end_stream_received = stream_state.end_stream_received + + Logger.warning( + "[SEND_GRPC_ERROR] stream=#{stream_id}, status=#{status_code}, message=#{message}, headers_sent=#{headers_sent}, end_stream_received=#{end_stream_received}" + ) + + # RFC 7540 Section 5.1: Se já enviamos END_STREAM (via headers_sent=true em resposta anterior), + # o stream está "closed" e NÃO podemos enviar mais frames (exceto PRIORITY) + # Nesse caso, apenas remover o stream e não enviar nada + if headers_sent && end_stream_received do + Logger.warning( + "[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream is fully closed (both sides sent END_STREAM)" + ) + + %{connection | streams: Map.delete(connection.streams, stream_id)} + else + # Check if headers were already sent for this stream + + # If headers not sent yet AND stream hasn't received END_STREAM, send HTTP/2 headers first + # If stream received END_STREAM, it's half-closed remote - we can only send TRAILERS + updated_connection = + if !headers_sent && !end_stream_received do + Logger.warning( + "[SEND_GRPC_ERROR] Sending HTTP/2 headers first for stream=#{stream_id}" + ) + + headers = %{":status" => "200", "content-type" => "application/grpc+proto"} + send_headers(socket, stream_id, headers, connection) + + # Mark headers as sent in the stream state + updated_stream = %{stream_state | headers_sent: true} + updated_conn = put_in(connection.streams[stream_id], updated_stream) + + trailers = %{ + "grpc-status" => to_string(status_code), + "grpc-message" => message + } + + Logger.warning( + "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" + ) + + send_grpc_trailers(socket, stream_id, trailers, updated_conn) + else + # If headers were sent OR stream received END_STREAM, just send trailers + if end_stream_received && !headers_sent do + Logger.warning( + "[SEND_GRPC_ERROR] Stream #{stream_id} received END_STREAM, skipping HEADERS, sending only TRAILERS" + ) + end + + trailers = %{ + "grpc-status" => to_string(status_code), + "grpc-message" => message + } + + Logger.warning( + "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" + ) + + send_grpc_trailers(socket, stream_id, trailers, connection) + end + + # Marcar que já enviamos erro (RFC 7540: após END_STREAM, o stream está closed) + updated_connection = + update_in(updated_connection.streams[stream_id], fn s -> + if s, do: %{s | error_sent: true}, else: nil + end) + + # RFC 7540: Após enviar END_STREAM, o stream transiciona para "closed" + # Remover imediatamente para evitar processar mais mensagens neste stream + Logger.warning("[REMOVE_STREAM] stream=#{stream_id} - removed after sending error") + %{updated_connection | streams: Map.delete(updated_connection.streams, stream_id)} + end + end + end + end + + # Check if buffer has at least one complete gRPC message + # gRPC message format: 1 byte compressed flag + 4 bytes length + N bytes data + defp has_complete_message?(buffer) when byte_size(buffer) < 5, do: false + + defp has_complete_message?(<<_compressed::8, length::32, rest::binary>>) do + byte_size(rest) >= length + end +end diff --git a/grpc_server/lib/grpc/server/http2/dispatcher.ex b/grpc_server/lib/grpc/server/http2/dispatcher.ex new file mode 100644 index 000000000..6cfb9b9dc --- /dev/null +++ b/grpc_server/lib/grpc/server/http2/dispatcher.ex @@ -0,0 +1,821 @@ +defmodule GRPC.Server.HTTP2.Dispatcher do + @moduledoc """ + Dispatches gRPC calls to registered services. + + This module: + - Parses gRPC path ("/package.Service/Method") + - Looks up service implementation from servers registry + - Decodes protobuf request messages + - Calls service handler functions + - Encodes protobuf response messages + - Handles streaming (client/server/bidirectional) + """ + + require Logger + + alias GRPC.Server.Cache + alias GRPC.Server.HTTP2.StreamState + alias GRPC.RPCError + + # Inline hot path functions + @compile {:inline, + parse_path: 1, + lookup_server: 2, + lookup_rpc: 2, + get_codec: 2, + get_compressor: 2, + encode_response: 4, + decode_messages: 4} + + @doc """ + Dispatches a gRPC call to the appropriate service. + + ## Parameters + - `stream_state`: HTTP/2 stream with decoded headers + - `servers`: Map of service name => server module + - `endpoint`: The endpoint module (for telemetry, etc) + + ## Returns + - `{:ok, response_headers, response_data, trailers}` - Success + - `{:error, rpc_error}` - gRPC error to send to client + """ + @spec dispatch(StreamState.t(), map(), atom(), GRPC.Server.HTTP2.Connection.t()) :: + {:ok, list(), binary(), map()} | {:error, GRPC.RPCError.t()} + def dispatch(%StreamState{} = stream_state, servers, endpoint, connection) do + Logger.info( + "[dispatch] path=#{stream_state.path}, messages=#{length(stream_state.message_buffer)}" + ) + + # Check deadline BEFORE processing - if exceeded, return error immediately + if StreamState.deadline_exceeded?(stream_state) do + now = System.monotonic_time(:microsecond) + + Logger.warning( + "[dispatch] Deadline exceeded for path=#{stream_state.path}, deadline=#{stream_state.deadline}, now=#{now}, diff=#{now - (stream_state.deadline || now)}us" + ) + + {:error, GRPC.RPCError.exception(status: :deadline_exceeded, message: "Deadline exceeded")} + else + with {:ok, service_name, method_name} <- parse_path(stream_state.path), + {:ok, server} <- lookup_server(servers, service_name), + {:ok, rpc} <- lookup_rpc(server, method_name), + {:ok, codec} <- get_codec(server, stream_state.content_type), + {:ok, compressor} <- get_compressor(server, stream_state.metadata), + {:ok, requests} <- + decode_messages(stream_state.message_buffer, rpc, codec, compressor), + {:ok, response} <- + call_service( + server, + rpc, + method_name, + requests, + stream_state, + endpoint, + codec, + compressor, + connection + ) do + Logger.info("[dispatch] Encoding response") + # Get custom headers/trailers from process dictionary + # (set by handler during execution via set_headers/set_trailers) + custom_headers = Process.get({:grpc_custom_headers, stream_state.stream_id}, %{}) + custom_trailers = Process.get({:grpc_custom_trailers, stream_state.stream_id}, %{}) + + # Update stream_state with custom headers/trailers + updated_stream_state = %{ + stream_state + | custom_headers: custom_headers, + custom_trailers: custom_trailers + } + + # Cleanup process dictionary + Process.delete({:grpc_custom_headers, stream_state.stream_id}) + Process.delete({:grpc_custom_trailers, stream_state.stream_id}) + + # Encode response(s) - could be single response or list for streaming + # Pass updated stream_state to include custom headers/trailers + encode_responses(response, codec, compressor, rpc, updated_stream_state) + else + {:error, %GRPC.RPCError{} = error} -> + error + + {:error, _reason} = err -> + Logger.error("Dispatch error: #{inspect(err)}") + {:error, GRPC.RPCError.exception(status: :internal, message: "Internal server error")} + end + end + + # Fecha o if deadline_exceeded? + end + + @doc """ + Parses gRPC path into service and method names. + + Examples: + - "/helloworld.Greeter/SayHello" → {"helloworld.Greeter", "SayHello"} + - "/package.subpackage.Service/Method" → {"package.subpackage.Service", "Method"} + """ + @spec parse_path(String.t()) :: {:ok, String.t(), String.t()} | {:error, RPCError.t()} + def parse_path("/" <> rest) do + case String.split(rest, "/", parts: 2) do + [service_name, method_name] when service_name != "" and method_name != "" -> + {:ok, service_name, method_name} + + _ -> + {:error, RPCError.exception(status: :unimplemented, message: "Invalid path format")} + end + end + + def parse_path(_) do + {:error, RPCError.exception(status: :unimplemented, message: "Path must start with /")} + end + + @doc """ + Checks if a gRPC path corresponds to a bidirectional streaming RPC. + + Returns `true` if both request and response are streaming, `false` otherwise. + """ + @spec is_bidi_streaming?(String.t(), map()) :: boolean() + def is_bidi_streaming?(path, servers) do + with {:ok, service_name, method_name} <- parse_path(path), + {:ok, server} <- lookup_server(servers, service_name), + {:ok, rpc} <- lookup_rpc(server, method_name) do + {_name, {_req_mod, req_stream?}, {_res_mod, res_stream?}, _opts} = rpc + req_stream? and res_stream? + else + _ -> false + end + end + + ## Private Functions + defp lookup_server(servers, service_name) do + case Map.get(servers, service_name) do + nil -> + {:error, + RPCError.exception(status: :unimplemented, message: "Service not found: #{service_name}")} + + server -> + {:ok, server} + end + end + + defp lookup_rpc(server, method_name) do + # Use cache to find RPC definition + case GRPC.Server.Cache.find_rpc(server, method_name) do + nil -> + {:error, + RPCError.exception( + status: :unimplemented, + message: "Method not found: #{method_name}" + )} + + rpc -> + {:ok, rpc} + end + end + + defp get_codec(server, content_type) do + # Extract codec subtype from content-type + # "application/grpc+proto" → "proto" + # "application/grpc+json" → "json" + subtype = + case String.split(content_type, "+", parts: 2) do + ["application/grpc", subtype] -> subtype + ["application/grpc"] -> "proto" + _ -> "proto" + end + + case Cache.find_codec(server, subtype) do + nil -> + {:error, + RPCError.exception(status: :unimplemented, message: "Codec not found: #{subtype}")} + + codec -> + {:ok, codec} + end + end + + defp get_compressor(server, metadata) do + # Check grpc-encoding header + encoding = Map.get(metadata, "grpc-encoding", "identity") + + if encoding == "identity" do + {:ok, nil} + else + case Cache.find_compressor(server, encoding) do + nil -> + {:error, + RPCError.exception( + status: :unimplemented, + message: "Compressor not found: #{encoding}" + )} + + compressor -> + {:ok, compressor} + end + end + end + + defp decode_messages(message_buffer, rpc, codec, compressor) do + # Extract request type from RPC definition + # RPC format: {name, {request_module, is_stream?}, {reply_module, is_stream?}, options} + {_name, {request_module, _is_stream?}, _reply, _options} = rpc + + try do + messages = + Enum.map(message_buffer, fn %{compressed: compressed?, data: data} -> + # Decompress if needed + data = + if compressed? and compressor do + compressor.decompress(data) + else + data + end + + # Decode protobuf with the request module + codec.decode(data, request_module) + end) + + {:ok, messages} + rescue + e -> + Logger.error("Failed to decode messages: #{inspect(e)}") + + {:error, + RPCError.exception(status: :invalid_argument, message: "Invalid request message")} + end + end + + defp call_unary(server, func_name, request, stream) do + Logger.info("[call_unary] Calling #{inspect(server)}.#{func_name}") + + # Check if function is implemented + if function_exported?(server, func_name, 2) do + # Accumulate base headers (don't send yet - handler may add custom headers) + base_headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + # Add grpc-encoding if there's a compressor + base_headers = + if stream.compressor do + Map.put(base_headers, "grpc-encoding", stream.compressor.name()) + else + base_headers + end + + GRPC.Server.set_headers(stream, base_headers) + + try do + # Call handler and get response + response = apply(server, func_name, [request, stream]) + Logger.info("[call_unary] Response received, sending via send_reply") + + # Send response using async message (this will send accumulated headers first) + GRPC.Server.send_reply(stream, response) + + # Get custom trailers from process dictionary (set by handler via set_trailers) + stream_id = stream.payload.stream_id + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + + # Merge with mandatory grpc-status + trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + + # Send trailers at the end with END_STREAM + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + # Return special marker for async handling (like server_streaming) + {:ok, :streaming_done} + rescue + e in GRPC.RPCError -> + # Send error as trailers (headers already accumulated, will be sent with trailers) + stream_id = stream.payload.stream_id + + error_trailers = %{ + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" + } + + # Get custom trailers from process dictionary + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + trailers = Map.merge(error_trailers, custom_trailers) + + # Send trailers with error (will send accumulated headers first if not sent yet) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + # Return streaming_done (error already sent) + {:ok, :streaming_done} + + e -> + Logger.error("Handler error: #{Exception.message(e)}") + + error_trailers = %{ + # UNKNOWN + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } + + # Send trailers with error + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + + # Return streaming_done (error already sent) + {:ok, :streaming_done} + end + else + # Function not implemented + Logger.error("Function #{inspect(server)}.#{func_name}/2 is not implemented") + + # Send required HTTP/2 headers first + headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + GRPC.Server.Adapters.ThousandIsland.send_headers(stream.payload, headers) + + # Then send error trailers + error_trailers = %{ + # UNIMPLEMENTED + "grpc-status" => "12", + "grpc-message" => "Method not implemented" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + end + + defp call_client_streaming(server, func_name, requests, stream) do + # Check if function is implemented + if function_exported?(server, func_name, 2) do + try do + # Accumulate base headers (don't send yet - handler may add custom headers) + base_headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + # Add grpc-encoding if there's a compressor + base_headers = + if stream.compressor do + Map.put(base_headers, "grpc-encoding", stream.compressor.name()) + else + base_headers + end + + # Accumulate base headers without sending + GRPC.Server.set_headers(stream, base_headers) + + # Convert list to stream + request_enum = Enum.into(requests, []) + response = apply(server, func_name, [request_enum, stream]) + + # Send response using async message (this will send accumulated headers first) + GRPC.Server.send_reply(stream, response) + + # Get custom trailers from process dictionary (set by handler via set_trailers) + stream_id = stream.payload.stream_id + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + + # Merge with mandatory grpc-status + trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + + # Send trailers at the end with END_STREAM + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + # Return special marker for async handling (like server_streaming) + {:ok, :streaming_done} + rescue + e in GRPC.RPCError -> + # Send error as trailers + stream_id = stream.payload.stream_id + + error_trailers = %{ + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" + } + + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + trailers = Map.merge(error_trailers, custom_trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + {:ok, :streaming_done} + + e -> + Logger.error("Handler error: #{Exception.message(e)}") + + error_trailers = %{ + # UNKNOWN + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + else + # Function not implemented + Logger.error("Function #{inspect(server)}.#{func_name}/2 is not implemented") + + # Send required HTTP/2 headers first + headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + GRPC.Server.Adapters.ThousandIsland.send_headers(stream.payload, headers) + + # Then send error trailers + error_trailers = %{ + # UNIMPLEMENTED + "grpc-status" => "12", + "grpc-message" => "Method not implemented" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + end + + defp call_server_streaming(server, func_name, request, stream) do + # Check if function is implemented + if function_exported?(server, func_name, 2) do + try do + # Accumulate base headers (don't send yet - handler may add custom headers) + base_headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + # Add grpc-encoding if there's a compressor + base_headers = + if stream.compressor do + Map.put(base_headers, "grpc-encoding", stream.compressor.name()) + else + base_headers + end + + GRPC.Server.set_headers(stream, base_headers) + + # Handler calls GRPC.Server.send_reply for each response + apply(server, func_name, [request, stream]) + + # Get custom trailers from process dictionary (set by handler via set_trailers) + stream_id = stream.payload.stream_id + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + + # Merge with mandatory grpc-status + trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + + # Send trailers at the end with END_STREAM + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + # Return special marker for streaming (not data to send) + {:ok, :streaming_done} + rescue + e in GRPC.RPCError -> + # Send error as trailers + stream_id = stream.payload.stream_id + + error_trailers = %{ + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" + } + + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + trailers = Map.merge(error_trailers, custom_trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + {:ok, :streaming_done} + + e -> + Logger.error("Handler error: #{Exception.message(e)}") + + error_trailers = %{ + # UNKNOWN + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + else + # Function not implemented + Logger.error("Function #{inspect(server)}.#{func_name}/2 is not implemented") + + # Send required HTTP/2 headers first + headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + GRPC.Server.Adapters.ThousandIsland.send_headers(stream.payload, headers) + + # Then send error trailers + error_trailers = %{ + # UNIMPLEMENTED + "grpc-status" => "12", + "grpc-message" => "Method not implemented" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + end + + defp call_bidi_streaming(server, func_name, requests, stream, _connection) do + # Check if function is implemented + if function_exported?(server, func_name, 2) do + stream_id = stream.payload.stream_state.stream_id + + Logger.info( + "[call_bidi_streaming] Starting bidi stream #{stream_id} with #{length(requests)} initial requests" + ) + + try do + # Mark as streaming mode so send_headers will send immediately + Process.put(:grpc_streaming_mode, true) + + # Start BidiStream Task with initial messages + {:ok, bidi_pid} = GRPC.Server.BidiStream.start_link(stream_id, requests) + + Logger.info( + "[call_bidi_streaming] BidiStream task started for stream #{stream_id}, pid=#{inspect(bidi_pid)}" + ) + + # Monitor the BidiStream task to detect early termination + ref = Process.monitor(bidi_pid) + Logger.debug("[call_bidi_streaming] Monitoring BidiStream task with ref #{inspect(ref)}") + + # Store the BidiStream PID in stream_state for later use + # (when more DATA frames arrive) + stream_state = stream.payload.stream_state + updated_stream_state = %{stream_state | bidi_stream_pid: bidi_pid, handler_started: true} + + # CRITICAL: Store in process dictionary for immediate access + # We can't use send() because handler is blocked in this dispatch call! + _handler_pid = stream.payload.handler_pid + + Logger.info( + "[Dispatcher] Storing bidi_pid #{inspect(bidi_pid)} in process dictionary for stream #{stream_id}" + ) + + # Store codec, compressor, and RPC in stream_state for decoding subsequent messages + updated_stream_state = %{ + updated_stream_state + | codec: stream.codec, + compressor: stream.compressor, + rpc: stream.rpc + } + + # Store both PID and full state in process dictionary + Process.put({:bidi_stream_pid, stream_id}, bidi_pid) + Process.put({:bidi_stream_state, stream_id}, updated_stream_state) + + # Create lazy enumerable from BidiStream + request_enum = GRPC.Server.BidiStream.to_enum(bidi_pid) + + # Accumulate base headers (don't send yet - handler may add custom headers) + base_headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + # Add grpc-encoding if there's a compressor + base_headers = + if stream.compressor do + Map.put(base_headers, "grpc-encoding", stream.compressor.name()) + else + base_headers + end + + # Accumulate base headers without sending + GRPC.Server.set_headers(stream, base_headers) + + # CRITICAL: Run handler in a separate task to not block the connection handler + # The connection handler MUST continue processing incoming DATA frames + # and feed them to the BidiStream while the handler is consuming messages + Task.start(fn -> + try do + # Handler receives lazy request stream and sends responses via stream + result = apply(server, func_name, [request_enum, stream]) + Logger.info("[call_bidi_streaming] Handler returned: #{inspect(result)}") + + # Get custom trailers from process dictionary (set by handler via set_trailers) + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + + # Merge with mandatory grpc-status + trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + + # Send trailers at the end with END_STREAM + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + rescue + e in GRPC.RPCError -> + Logger.error("[call_bidi_streaming] Handler RPC Error: #{inspect(e)}") + + trailers = %{ + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + e -> + Logger.error( + "[call_bidi_streaming] Handler error: #{Exception.message(e)}\n#{Exception.format_stacktrace(__STACKTRACE__)}" + ) + + trailers = %{ + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + end + end) + + # Return special marker for streaming (not data to send) + {:ok, :streaming_done} + rescue + e in GRPC.RPCError -> + Logger.error("[call_bidi_streaming] RPC Error: #{inspect(e)}") + + trailers = %{ + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + {:ok, :streaming_done} + + e -> + Logger.error( + "[call_bidi_streaming] Handler error: #{Exception.message(e)}\n#{Exception.format_stacktrace(__STACKTRACE__)}" + ) + + trailers = %{ + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + {:ok, :streaming_done} + end + else + # Function not implemented + Logger.error("Function #{inspect(server)}.#{func_name}/2 is not implemented") + + # Send required HTTP/2 headers first + headers = %{ + ":status" => "200", + "content-type" => "application/grpc+proto" + } + + GRPC.Server.Adapters.ThousandIsland.send_headers(stream.payload, headers) + + # Then send error trailers + error_trailers = %{ + # UNIMPLEMENTED + "grpc-status" => "12", + "grpc-message" => "Method not implemented" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) + {:ok, :streaming_done} + end + end + + # Encode responses - handles both unary (single response) and streaming (special marker) + defp encode_responses(:streaming_done, _codec, _compressor, _rpc, _stream_state) do + # Streaming was already handled incrementally, nothing to encode + {:ok, :streaming_done} + end + + defp encode_responses(response, codec, compressor, rpc, stream_state) do + {_name, {_req_mod, _req_stream?}, {_res_mod, res_stream?}, _opts} = rpc + + if res_stream? do + # This path shouldn't be reached anymore (streaming handled in call_server_streaming) + Logger.warning("Unexpected: encode_responses called with streaming response") + {:ok, :streaming_done} + else + # Unary - encode single response with custom headers/trailers from stream_state + encode_response(response, codec, compressor, stream_state) + end + end + + # Encode a single message (without headers/trailers) + defp encode_message(response, codec, compressor) do + # Encode protobuf + encoded = codec.encode(response) + + # Compress if needed + {compressed_flag, data_binary} = + if compressor do + binary_data = IO.iodata_to_binary(encoded) + {1, compressor.compress(binary_data)} + else + {0, IO.iodata_to_binary(encoded)} + end + + # Calculate length + length = byte_size(data_binary) + + # Build 5-byte length-prefixed message + <> + end + + defp encode_response(response, codec, compressor, stream_state) do + try do + # Encode the message using the helper + message_data = encode_message(response, codec, compressor) + + # Build response headers + base_headers = [ + {":status", "200"}, + {"content-type", "application/grpc+proto"} + ] + + # Only add grpc-encoding if there's actual compression (not identity) + headers = + if compressor do + base_headers ++ [{"grpc-encoding", compressor.name()}] + else + base_headers + end + + # Merge custom headers from handler (for custom_metadata test) + headers = + if map_size(stream_state.custom_headers) > 0 do + headers ++ Map.to_list(stream_state.custom_headers) + else + headers + end + + # Build trailers (mandatory grpc-status) + trailers = %{ + "grpc-status" => "0" + } + + # Merge custom trailers from handler (for custom_metadata test) + trailers = Map.merge(trailers, stream_state.custom_trailers) + + {:ok, headers, message_data, trailers} + rescue + e -> + Logger.error("Failed to encode response: #{inspect(e)}") + {:error, RPCError.exception(status: :internal, message: "Failed to encode response")} + end + end + + defp call_service( + server, + rpc, + method_name, + requests, + stream_state, + endpoint, + codec, + compressor, + connection + ) do + {_name, {req_mod, req_stream?}, {res_mod, res_stream?}, _opts} = rpc + func_name = Macro.underscore(method_name) |> String.to_atom() + + # Create a payload struct with metadata and handler info for streaming + payload = %{ + headers: stream_state.metadata, + stream_state: stream_state, + handler_pid: stream_state.handler_pid, + stream_id: stream_state.stream_id + } + + grpc_stream = %GRPC.Server.Stream{ + server: server, + endpoint: endpoint, + request_mod: req_mod, + response_mod: res_mod, + rpc: rpc, + codec: codec, + compressor: compressor, + adapter: GRPC.Server.Adapters.ThousandIsland, + payload: payload + } + + case {req_stream?, res_stream?} do + {false, false} -> + [request] = requests + call_unary(server, func_name, request, grpc_stream) + + {true, false} -> + call_client_streaming(server, func_name, requests, grpc_stream) + + {false, true} -> + [request] = requests + call_server_streaming(server, func_name, request, grpc_stream) + + {true, true} -> + call_bidi_streaming(server, func_name, requests, grpc_stream, connection) + end + end +end diff --git a/grpc_server/lib/grpc/server/http2/stream_state.ex b/grpc_server/lib/grpc/server/http2/stream_state.ex new file mode 100644 index 000000000..11673238e --- /dev/null +++ b/grpc_server/lib/grpc/server/http2/stream_state.ex @@ -0,0 +1,360 @@ +defmodule GRPC.Server.HTTP2.StreamState do + @moduledoc """ + Manages individual HTTP/2 stream state for gRPC requests. + + Each gRPC call is handled as a separate HTTP/2 stream. This module: + - Decodes HTTP/2 headers into gRPC metadata + - Accumulates DATA frames into gRPC messages + - Handles 5-byte length-prefixed message framing + - Manages stream lifecycle (HEADERS -> DATA -> trailers) + """ + + alias GRPC.HTTP2.Frame + + @type stream_id :: pos_integer() + @type state :: :idle | :open | :half_closed_local | :half_closed_remote | :closed + + @type t :: %__MODULE__{ + stream_id: stream_id(), + state: state(), + # gRPC request info + path: String.t() | nil, + method: String.t() | nil, + authority: String.t() | nil, + content_type: String.t() | nil, + metadata: map(), + # Deadline (absolute time in microseconds when request expires) + deadline: integer() | nil, + # Message buffering + data_buffer: binary(), + message_buffer: [map()], + # Flow control + window_size: integer(), + # Trailers + trailers: map(), + # Handler PID for streaming + handler_pid: pid() | nil, + # Custom headers/trailers set by handler + custom_headers: map(), + custom_trailers: map(), + # Flag to track if response headers were sent + headers_sent: boolean(), + # Bidirectional streaming flag + is_bidi_streaming: boolean(), + # Flag to track if handler was already started (for bidi) + handler_started: boolean(), + # PID of the BidiStream task (for bidi streaming only) + bidi_stream_pid: pid() | nil, + # Codec, compressor, and RPC info for decoding subsequent messages + codec: module() | nil, + compressor: module() | nil, + rpc: tuple() | nil, + # Flag to track if client sent END_STREAM (stream half-closed remote) + end_stream_received: boolean(), + # Flag to track if we already sent an error response (prevent duplicates) + error_sent: boolean() + } + + defstruct stream_id: nil, + state: :idle, + path: nil, + method: nil, + authority: nil, + content_type: nil, + metadata: %{}, + data_buffer: <<>>, + message_buffer: [], + window_size: 65_535, + trailers: %{}, + is_bidi_streaming: false, + handler_started: false, + bidi_stream_pid: nil, + handler_pid: nil, + custom_headers: %{}, + custom_trailers: %{}, + headers_sent: false, + codec: nil, + compressor: nil, + rpc: nil, + deadline: nil, + end_stream_received: false, + error_sent: false + + # Parses grpc-timeout header and converts to absolute deadline + # Format: "1H" (hour), "1M" (minute), "1S" (second), "1m" (millisecond), "1u" (microsecond), "1n" (nanosecond) + defp parse_timeout_to_deadline(nil), do: nil + + defp parse_timeout_to_deadline(timeout_str) do + case Integer.parse(timeout_str) do + {value, "H"} -> System.monotonic_time(:microsecond) + value * 3_600_000_000 + {value, "M"} -> System.monotonic_time(:microsecond) + value * 60_000_000 + {value, "S"} -> System.monotonic_time(:microsecond) + value * 1_000_000 + {value, "m"} -> System.monotonic_time(:microsecond) + value * 1_000 + {value, "u"} -> System.monotonic_time(:microsecond) + value + {value, "n"} -> System.monotonic_time(:microsecond) + div(value, 1_000) + _ -> nil + end + end + + @doc """ + Creates a new stream state. + """ + @spec new(stream_id(), integer()) :: t() + def new(stream_id, initial_window_size \\ 65_535) do + %__MODULE__{ + stream_id: stream_id, + state: :idle, + window_size: initial_window_size + } + end + + @doc """ + Creates a stream state from decoded HTTP/2 headers. + Extracts pseudo-headers and metadata from header list. + """ + @spec from_headers(stream_id(), list({String.t(), String.t()}), integer()) :: t() + def from_headers(stream_id, headers, initial_window_size \\ 65_535) do + stream = new(stream_id, initial_window_size) + + {path, method, authority, content_type, timeout_str, metadata} = + Enum.reduce(headers, {nil, nil, nil, nil, nil, %{}}, fn + {":path", value}, {_, m, a, ct, t, meta} -> + {value, m, a, ct, t, meta} + + {":method", value}, {p, _, a, ct, t, meta} -> + {p, value, a, ct, t, meta} + + {":authority", value}, {p, m, _, ct, t, meta} -> + {p, m, value, ct, t, meta} + + {"content-type", value}, {p, m, a, _, t, meta} -> + {p, m, a, value, t, meta} + + {"grpc-timeout", value}, {p, m, a, ct, _, meta} -> + {p, m, a, ct, value, meta} + + # Skip other pseudo-headers + {":" <> _rest, _value}, acc -> + acc + + {key, value}, {p, m, a, ct, t, meta} -> + {p, m, a, ct, t, Map.put(meta, key, value)} + end) + + # Parse timeout and calculate deadline + deadline = parse_timeout_to_deadline(timeout_str) + + if timeout_str do + require Logger + + Logger.info( + "[StreamState] Parsed grpc-timeout: #{timeout_str} -> deadline: #{inspect(deadline)} (now: #{System.monotonic_time(:microsecond)})" + ) + end + + %{ + stream + | path: path, + method: method, + authority: authority, + content_type: content_type || "application/grpc+proto", + metadata: metadata, + deadline: deadline, + state: :open + } + end + + @doc """ + Checks if the stream's deadline has been exceeded. + Returns true if the deadline exists and has passed, false otherwise. + """ + @spec deadline_exceeded?(t()) :: boolean() + def deadline_exceeded?(%__MODULE__{deadline: nil}), do: false + + def deadline_exceeded?(%__MODULE__{deadline: deadline}) do + System.monotonic_time(:microsecond) > deadline + end + + @doc """ + Processes HEADERS frame and extracts gRPC metadata. + + HTTP/2 pseudo-headers are decoded into gRPC request fields: + - `:method` → "POST" (gRPC always uses POST) + - `:path` → "/package.Service/Method" + - `:authority` → "host:port" + - `content-type` → "application/grpc+proto" etc + + Other headers become gRPC metadata. + """ + @spec handle_headers(t(), Frame.Headers.t()) :: {:ok, t()} | {:error, term()} + def handle_headers(stream, %Frame.Headers{} = headers) do + case stream.state do + :idle -> + decode_headers(stream, headers) + + :half_closed_remote -> + # Trailers received + decode_trailers(stream, headers) + + _other -> + {:error, :protocol_error} + end + end + + @doc """ + Processes DATA frame and accumulates gRPC messages. + + gRPC uses 5-byte length-prefixed framing: + - Byte 0: Compression flag (0 = no compression, 1 = compressed) + - Bytes 1-4: Message length (big-endian uint32) + - Bytes 5+: Message payload + + Multiple messages can arrive in a single DATA frame or be split across frames. + """ + @spec handle_data(t(), Frame.Data.t()) :: {:ok, t(), [map()]} | {:error, term()} + def handle_data(stream, %Frame.Data{} = data) do + case stream.state do + :open -> + process_data(stream, data) + + _other -> + {:error, :stream_closed} + end + end + + @doc """ + Updates stream state based on end_stream flag. + """ + @spec maybe_close_stream(t(), boolean()) :: t() + def maybe_close_stream(stream, end_stream) do + if end_stream do + new_state = + case stream.state do + :open -> :half_closed_remote + :half_closed_local -> :closed + other -> other + end + + %{stream | state: new_state} + else + stream + end + end + + @doc """ + Updates stream window size for flow control. + """ + @spec update_window(t(), integer()) :: {:ok, t()} | {:error, :flow_control_error} + def update_window(stream, increment) do + case GRPC.HTTP2.FlowControl.update_window(stream.window_size, increment) do + {:ok, new_size} -> + {:ok, %{stream | window_size: new_size}} + + {:error, reason} -> + {:error, reason} + end + end + + @doc """ + Checks if stream has enough window to send data. + """ + @spec has_window?(t(), integer()) :: boolean() + def has_window?(stream, size) do + stream.window_size >= size + end + + ## Private Functions + + defp decode_headers(stream, headers) do + headers_map = + Enum.reduce(headers.headers, %{}, fn {name, value}, acc -> + Map.put(acc, name, value) + end) + + # Extract pseudo-headers + method = Map.get(headers_map, ":method") + path = Map.get(headers_map, ":path") + authority = Map.get(headers_map, ":authority") + content_type = Map.get(headers_map, "content-type") + + # Validate gRPC request + with :ok <- validate_method(method), + :ok <- validate_path(path), + :ok <- validate_content_type(content_type) do + # Extract metadata (non-pseudo headers) + metadata = + headers_map + |> Enum.filter(fn {name, _value} -> !String.starts_with?(name, ":") end) + |> Enum.into(%{}) + + stream = %{ + stream + | state: :open, + method: method, + path: path, + authority: authority, + content_type: content_type, + metadata: metadata + } + + stream = maybe_close_stream(stream, headers.end_stream) + + {:ok, stream} + else + {:error, reason} -> {:error, reason} + end + end + + defp decode_trailers(stream, headers) do + trailers = + Enum.reduce(headers.headers, %{}, fn {name, value}, acc -> + Map.put(acc, name, value) + end) + + stream = %{stream | trailers: trailers} + stream = maybe_close_stream(stream, headers.end_stream) + + {:ok, stream} + end + + defp validate_method("POST"), do: :ok + defp validate_method(_), do: {:error, :invalid_method} + + defp validate_path("/" <> _rest), do: :ok + defp validate_path(_), do: {:error, :invalid_path} + + defp validate_content_type("application/grpc" <> _), do: :ok + defp validate_content_type(_), do: {:error, :invalid_content_type} + + defp process_data(stream, data) do + # Append to buffer + buffer = stream.data_buffer <> data.data + + # Extract complete messages + {messages, remaining} = extract_messages(buffer, []) + + stream = %{ + stream + | data_buffer: remaining, + message_buffer: stream.message_buffer ++ messages + } + + stream = maybe_close_stream(stream, data.end_stream) + + {:ok, stream, messages} + end + + # Extract 5-byte length-prefixed messages + defp extract_messages( + <>, + acc + ) do + message = %{compressed: compressed == 1, data: payload} + extract_messages(rest, acc ++ [message]) + end + + defp extract_messages(buffer, acc) do + # Not enough data for a complete message + {acc, buffer} + end +end diff --git a/grpc_server/lib/grpc/server/stream.ex b/grpc_server/lib/grpc/server/stream.ex index fa0a7c195..d0766e0a4 100644 --- a/grpc_server/lib/grpc/server/stream.ex +++ b/grpc_server/lib/grpc/server/stream.ex @@ -98,10 +98,8 @@ defmodule GRPC.Server.Stream do data, opts ) do - opts = - opts - |> Keyword.put(:codec, codec) - |> Keyword.put(:http_transcode, access_mode == :http_transcoding) + # Optimize opts construction - avoid multiple Keyword operations + opts = [{:codec, codec}, {:http_transcode, access_mode == :http_transcoding} | opts] adapter.send_reply(stream.payload, data, opts) diff --git a/grpc_server/mix.exs b/grpc_server/mix.exs index f34e572a4..fa4143acf 100644 --- a/grpc_server/mix.exs +++ b/grpc_server/mix.exs @@ -38,6 +38,8 @@ defmodule GRPC.Server.MixProject do {:protobuf, "~> 0.14"}, {:cowboy, "~> 2.14"}, {:cowlib, "~> 2.14"}, + {:thousand_island, "~> 1.4"}, + {:hpax, "~> 1.0"}, {:flow, "~> 1.2"}, {:protobuf_generate, "~> 0.1.3", only: [:dev, :test]}, {:ex_parameterized, "~> 1.3.7", only: :test}, diff --git a/grpc_server/mix.lock b/grpc_server/mix.lock index 9431acb94..503ff9108 100644 --- a/grpc_server/mix.lock +++ b/grpc_server/mix.lock @@ -24,4 +24,5 @@ "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"}, "rustler_precompiled": {:hex, :rustler_precompiled, "0.8.3", "4e741024b0b097fe783add06e53ae9a6f23ddc78df1010f215df0c02915ef5a8", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:rustler, "~> 0.23", [hex: :rustler, repo: "hexpm", optional: true]}], "hexpm", "c23f5f33cb6608542de4d04faf0f0291458c352a4648e4d28d17ee1098cddcc4"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "thousand_island": {:hex, :thousand_island, "1.4.2", "735fa783005d1703359bbd2d3a5a3a398075ba4456e5afe3c5b7cf4666303d36", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1c7637f16558fc1c35746d5ee0e83b18b8e59e18d28affd1f2fa1645f8bc7473"}, } diff --git a/grpc_server/src/grpc_stream_h.erl b/grpc_server/src/grpc_stream_h.erl index 42e2c0525..20f576645 100644 --- a/grpc_server/src/grpc_stream_h.erl +++ b/grpc_server/src/grpc_stream_h.erl @@ -67,7 +67,9 @@ expect(Req) -> %% Stream isn't waiting for data. data(StreamID, IsFin, Data, State=#state{ read_body_ref=undefined, read_body_buffer=Buffer, body_length=BodyLen}) -> - Commands = case byte_size(Data) of + % Optimization: calculate byte_size once and reuse + DataSize = byte_size(Data), + Commands = case DataSize of 0 -> []; Size -> @@ -77,14 +79,16 @@ data(StreamID, IsFin, Data, State=#state{ expect=undefined, read_body_is_fin=IsFin, read_body_buffer= << Buffer/binary, Data/binary >>, - body_length=BodyLen + byte_size(Data) + body_length=BodyLen + DataSize }); %% Stream is waiting for data using auto mode. % GRPC: We don't pass auto, but treat it as auto data(StreamID, IsFin, Data, State=#state{read_body_pid=Pid, read_body_ref=Ref, body_length=BodyLen}) -> send_request_body(Pid, Ref, IsFin, BodyLen, Data), - Commands = case byte_size(Data) of + % Optimization: calculate byte_size once and reuse + DataSize = byte_size(Data), + Commands = case DataSize of 0 -> []; Size -> diff --git a/grpc_server/test/grpc/server/adapters/thousand_island_test.exs b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs new file mode 100644 index 000000000..ce331db4f --- /dev/null +++ b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs @@ -0,0 +1,70 @@ +defmodule GRPC.Server.Adapters.ThousandIslandTest do + use ExUnit.Case, async: true + + alias GRPC.Server.Adapters.ThousandIsland, as: Adapter + + describe "child_spec/2" do + test "returns valid child spec" do + spec = Adapter.child_spec(:test_endpoint, [], 50051, []) + + # The spec should be a supervisor child spec that wraps ThousandIsland + Task.Supervisor + assert is_map(spec) + assert spec.type == :supervisor + assert {Supervisor, :start_link, [children, _opts]} = spec.start + assert is_list(children) + + # Should have Task.Supervisor and ThousandIsland children + assert length(children) == 2 + [task_sup, ti_child] = children + + # First child is Task.Supervisor + assert {Task.Supervisor, [name: GRPC.Server.StreamTaskSupervisor]} = task_sup + + # Second child is ThousandIsland + assert ti_child.id == :thousand_island + assert {ThousandIsland, :start_link, [args]} = ti_child.start + assert is_list(args) + assert args[:port] == 50051 + end + + test "includes adapter options in child spec" do + opts = [num_acceptors: 5, num_connections: 50] + spec = Adapter.child_spec(:test_endpoint, [], 50051, opts) + + {Supervisor, :start_link, [children, _opts]} = spec.start + [_task_sup, ti_child] = children + {ThousandIsland, :start_link, [args]} = ti_child.start + + assert args[:num_acceptors] == 5 + assert args[:num_connections] == 50 + end + end + + describe "start/4" do + test "can start and stop server" do + {:ok, pid, port} = Adapter.start(:test_server_unique_a, [], 0, []) + + assert is_pid(pid) + assert is_integer(port) + # Note: Port 0 means "choose any available port" + # ThousandIsland returns the actual assigned port + assert Process.alive?(pid) + + # Stop server + Supervisor.stop(pid) + refute Process.alive?(pid) + end + + test "accepts custom options" do + opts = [ + num_acceptors: 2, + num_connections: 10 + ] + + {:ok, pid, _port} = Adapter.start(:test_server_unique_b, [], 0, opts) + assert Process.alive?(pid) + + Supervisor.stop(pid) + end + end +end diff --git a/grpc_server/test/grpc/server/http2/connection_test.exs b/grpc_server/test/grpc/server/http2/connection_test.exs new file mode 100644 index 000000000..1cd3a4b95 --- /dev/null +++ b/grpc_server/test/grpc/server/http2/connection_test.exs @@ -0,0 +1,299 @@ +defmodule GRPC.Server.HTTP2.ConnectionTest do + use ExUnit.Case, async: true + + alias GRPC.Server.HTTP2.Connection + alias GRPC.HTTP2.{Frame, Settings, Errors} + + # For now, we'll test without mocking the socket + # Just test the logic of handle_frame functions + + describe "handle_frame/3 - SETTINGS" do + test "applies SETTINGS and updates remote settings" do + # Create a minimal connection struct + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{}, + send_hpack_state: HPAX.new(4096), + recv_hpack_state: HPAX.new(4096) + } + + settings_frame = %Frame.Settings{ + ack: false, + settings: [header_table_size: 8192, max_frame_size: 32_768] + } + + # Mock socket - we won't actually call send + socket = nil + + new_connection = Connection.handle_frame(settings_frame, socket, connection) + + # Should have updated remote settings + assert new_connection.remote_settings.header_table_size == 8192 + assert new_connection.remote_settings.max_frame_size == 32_768 + end + + test "ignores SETTINGS ACK frames" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + ack_frame = %Frame.Settings{ack: true, settings: []} + socket = nil + + new_connection = Connection.handle_frame(ack_frame, socket, connection) + + # Connection should be unchanged (except socket interaction) + assert new_connection.remote_settings == connection.remote_settings + end + + test "raises error for invalid initial window size" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + invalid_frame = %Frame.Settings{ + ack: false, + settings: [initial_window_size: 3_000_000_000] + } + + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(invalid_frame, socket, connection) + end + end + + test "raises error for invalid max frame size (too small)" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + invalid_frame = %Frame.Settings{ + ack: false, + settings: [max_frame_size: 1000] + } + + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(invalid_frame, socket, connection) + end + end + + test "raises error for invalid max frame size (too large)" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + invalid_frame = %Frame.Settings{ + ack: false, + settings: [max_frame_size: 20_000_000] + } + + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(invalid_frame, socket, connection) + end + end + + test "updates HPACK table size when header_table_size changes" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{header_table_size: 4096}, + send_hpack_state: HPAX.new(4096), + recv_hpack_state: HPAX.new(4096) + } + + settings_frame = %Frame.Settings{ + ack: false, + settings: [header_table_size: 8192] + } + + socket = nil + + new_connection = Connection.handle_frame(settings_frame, socket, connection) + + # HPACK state should be resized + assert new_connection.remote_settings.header_table_size == 8192 + end + end + + describe "handle_frame/3 - PING" do + test "connection unchanged after PING (ACK sent via socket)" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + ping_frame = %Frame.Ping{ack: false, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>} + socket = nil + + new_connection = Connection.handle_frame(ping_frame, socket, connection) + + # Connection state should be unchanged (response sent via socket) + assert new_connection == connection + end + + test "ignores PING ACK frames" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + pong_frame = %Frame.Ping{ack: true, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>} + socket = nil + + new_connection = Connection.handle_frame(pong_frame, socket, connection) + + assert new_connection == connection + end + end + + describe "handle_frame/3 - WINDOW_UPDATE" do + test "updates connection send window" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{}, + send_window_size: 65_535 + } + + window_update = %Frame.WindowUpdate{stream_id: 0, size_increment: 1000} + socket = nil + + new_connection = Connection.handle_frame(window_update, socket, connection) + + assert new_connection.send_window_size == 66_535 + end + + test "raises error on window overflow" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{}, + send_window_size: 2_147_483_647 + } + + window_update = %Frame.WindowUpdate{stream_id: 0, size_increment: 1000} + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(window_update, socket, connection) + end + end + end + + describe "handle_frame/3 - CONTINUATION" do + test "accumulates CONTINUATION frames until end_headers" do + headers_frame = %Frame.Headers{ + stream_id: 1, + end_stream: false, + end_headers: false, + fragment: <<1, 2, 3>> + } + + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{}, + fragment_frame: headers_frame + } + + cont1 = %Frame.Continuation{ + stream_id: 1, + end_headers: false, + fragment: <<4, 5, 6>> + } + + socket = nil + + new_connection = Connection.handle_frame(cont1, socket, connection) + + # Should have accumulated fragment + assert new_connection.fragment_frame != nil + assert new_connection.fragment_frame.fragment == <<1, 2, 3, 4, 5, 6>> + end + + test "raises error if non-CONTINUATION frame while expecting CONTINUATION" do + headers_frame = %Frame.Headers{ + stream_id: 1, + end_stream: false, + end_headers: false, + fragment: <<1, 2, 3>> + } + + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{}, + fragment_frame: headers_frame + } + + # Send DATA frame instead of CONTINUATION + data_frame = %Frame.Data{stream_id: 1, end_stream: false, data: <<>>} + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(data_frame, socket, connection) + end + end + end + + describe "handle_frame/3 - unsupported frames" do + test "raises error for PUSH_PROMISE (not supported in gRPC)" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + push_frame = %Frame.PushPromise{ + stream_id: 1, + promised_stream_id: 2, + end_headers: true, + fragment: <<>> + } + + socket = nil + + assert_raise Errors.ConnectionError, fn -> + Connection.handle_frame(push_frame, socket, connection) + end + end + + test "ignores PRIORITY frames (gRPC doesn't use priorities)" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + priority_frame = %Frame.Priority{ + stream_id: 1, + exclusive_dependency: false, + stream_dependency: 0, + weight: 16 + } + + socket = nil + + new_connection = Connection.handle_frame(priority_frame, socket, connection) + + # Should be unchanged + assert new_connection == connection + end + + test "ignores UNKNOWN frames" do + connection = %Connection{ + local_settings: %Settings{}, + remote_settings: %Settings{} + } + + unknown_frame = %Frame.Unknown{type: 255, flags: 0, stream_id: 0, payload: <<>>} + socket = nil + + new_connection = Connection.handle_frame(unknown_frame, socket, connection) + + assert new_connection == connection + end + end +end diff --git a/grpc_server/test/grpc/server/http2/errors_test.exs b/grpc_server/test/grpc/server/http2/errors_test.exs new file mode 100644 index 000000000..208e7242e --- /dev/null +++ b/grpc_server/test/grpc/server/http2/errors_test.exs @@ -0,0 +1,79 @@ +defmodule GRPC.HTTP2.ErrorsTest do + use ExUnit.Case, async: true + + alias GRPC.HTTP2.Errors + + describe "error codes" do + test "no_error returns 0x0" do + assert Errors.no_error() == 0x0 + end + + test "protocol_error returns 0x1" do + assert Errors.protocol_error() == 0x1 + end + + test "internal_error returns 0x2" do + assert Errors.internal_error() == 0x2 + end + + test "flow_control_error returns 0x3" do + assert Errors.flow_control_error() == 0x3 + end + + test "settings_timeout returns 0x4" do + assert Errors.settings_timeout() == 0x4 + end + + test "stream_closed returns 0x5" do + assert Errors.stream_closed() == 0x5 + end + + test "frame_size_error returns 0x6" do + assert Errors.frame_size_error() == 0x6 + end + + test "refused_stream returns 0x7" do + assert Errors.refused_stream() == 0x7 + end + + test "cancel returns 0x8" do + assert Errors.cancel() == 0x8 + end + + test "compression_error returns 0x9" do + assert Errors.compression_error() == 0x9 + end + + test "connect_error returns 0xA" do + assert Errors.connect_error() == 0xA + end + + test "enhance_your_calm returns 0xB" do + assert Errors.enhance_your_calm() == 0xB + end + + test "inadequate_security returns 0xC" do + assert Errors.inadequate_security() == 0xC + end + + test "http_1_1_requires returns 0xD" do + assert Errors.http_1_1_requires() == 0xD + end + end + + describe "ConnectionError" do + test "can be raised with message and error code" do + assert_raise Errors.ConnectionError, "test message", fn -> + raise Errors.ConnectionError, message: "test message", error_code: 0x1 + end + end + end + + describe "StreamError" do + test "can be raised with message, error code and stream_id" do + assert_raise Errors.StreamError, "test message", fn -> + raise Errors.StreamError, message: "test message", error_code: 0x1, stream_id: 1 + end + end + end +end diff --git a/grpc_server/test/grpc/server/http2/flow_control_test.exs b/grpc_server/test/grpc/server/http2/flow_control_test.exs new file mode 100644 index 000000000..6b4fa997a --- /dev/null +++ b/grpc_server/test/grpc/server/http2/flow_control_test.exs @@ -0,0 +1,80 @@ +defmodule GRPC.HTTP2.FlowControlTest do + use ExUnit.Case, async: true + + alias GRPC.HTTP2.FlowControl + + import Bitwise + + @max_window_size (1 <<< 31) - 1 + @min_window_size 1 <<< 30 + + describe "compute_recv_window/2" do + test "returns no increment when window is still large" do + # Window is still above minimum, no need to update yet + recv_window_size = @min_window_size + 1000 + data_size = 500 + + assert {new_window, 0} = FlowControl.compute_recv_window(recv_window_size, data_size) + assert new_window == recv_window_size - data_size + end + + test "returns increment when window falls below minimum" do + # Window falls below minimum, need to send WINDOW_UPDATE + recv_window_size = @min_window_size + 100 + data_size = 200 + + assert {new_window, increment} = + FlowControl.compute_recv_window(recv_window_size, data_size) + + assert new_window > recv_window_size - data_size + assert increment > 0 + end + + test "respects maximum window size" do + # Even when requesting update, should not exceed max window size + recv_window_size = @min_window_size - 1000 + data_size = 100 + + assert {new_window, increment} = + FlowControl.compute_recv_window(recv_window_size, data_size) + + assert new_window <= @max_window_size + assert increment > 0 + end + + test "handles small recv_window_size" do + recv_window_size = 1000 + data_size = 500 + + assert {new_window, increment} = + FlowControl.compute_recv_window(recv_window_size, data_size) + + assert new_window > recv_window_size - data_size + assert increment > 0 + end + + test "handles edge case when recv_window equals min_window_size" do + recv_window_size = @min_window_size + data_size = 1 + + assert {new_window, increment} = + FlowControl.compute_recv_window(recv_window_size, data_size) + + assert new_window > recv_window_size - data_size + assert increment > 0 + end + + test "handles large data size" do + recv_window_size = @max_window_size + data_size = @min_window_size + 1000 + + assert {new_window, increment} = + FlowControl.compute_recv_window(recv_window_size, data_size) + + # Window should still be positive after large data consumption + assert new_window >= 0 + # May or may not need increment depending on resulting window size + assert is_integer(increment) + end + end +end diff --git a/grpc_server/test/grpc/server/http2/frame_test.exs b/grpc_server/test/grpc/server/http2/frame_test.exs new file mode 100644 index 000000000..a2b61df77 --- /dev/null +++ b/grpc_server/test/grpc/server/http2/frame_test.exs @@ -0,0 +1,260 @@ +defmodule GRPC.HTTP2.FrameTest do + use ExUnit.Case, async: true + + alias GRPC.HTTP2.{Frame, Errors} + + describe "frame deserialization" do + test "deserializes DATA frames" do + # DATA frame: stream_id=1, no padding, end_stream=false + frame = <<0, 0, 3, 0, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, %Frame.Data{stream_id: 1, end_stream: false, data: <<1, 2, 3>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes HEADERS frames" do + # HEADERS frame: stream_id=1, end_stream=false, end_headers=false + frame = <<0, 0, 3, 1, 0x00, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Headers{ + stream_id: 1, + end_stream: false, + end_headers: false, + fragment: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes SETTINGS frames" do + # SETTINGS frame: max_frame_size=32768 + frame = <<0, 0, 6, 4, 0, 0, 0, 0, 0, 0, 5, 0, 0, 128, 0>> + + assert {{:ok, %Frame.Settings{ack: false, settings: %{max_frame_size: 32_768}}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes SETTINGS ACK frames" do + frame = <<0, 0, 0, 4, 1, 0, 0, 0, 0>> + + assert {{:ok, %Frame.Settings{ack: true}}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes PING frames" do + frame = <<0, 0, 8, 6, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8>> + + assert {{:ok, %Frame.Ping{ack: false, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes PING ACK frames" do + frame = <<0, 0, 8, 6, 1, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8>> + + assert {{:ok, %Frame.Ping{ack: true, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes GOAWAY frames" do + frame = <<0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2>> + + assert {{:ok, %Frame.Goaway{last_stream_id: 1, error_code: 2, debug_data: <<>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes RST_STREAM frames" do + frame = <<0, 0, 4, 3, 0, 0, 0, 0, 1, 0, 0, 0, 8>> + + assert {{:ok, %Frame.RstStream{stream_id: 1, error_code: 8}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes WINDOW_UPDATE frames" do + frame = <<0, 0, 4, 8, 0, 0, 0, 0, 0, 0, 0, 0, 100>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 0, size_increment: 100}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes CONTINUATION frames" do + frame = <<0, 0, 3, 9, 0x00, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Continuation{ + stream_id: 1, + end_headers: false, + fragment: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes PRIORITY frames" do + frame = <<0, 0, 5, 2, 0, 0, 0, 0, 1, 0::1, 12::31, 34>> + + assert {{:ok, + %Frame.Priority{ + stream_id: 1, + exclusive_dependency: false, + stream_dependency: 12, + weight: 34 + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes unknown frame types" do + # Unknown type 0xFF + frame = <<0, 0, 3, 0xFF, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Unknown{ + type: 0xFF, + flags: 0, + stream_id: 1, + payload: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "returns extra data when frame is followed by more data" do + frame = <<0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, %Frame.Data{stream_id: 1, data: <<>>}}, <<1, 2, 3>>} = + Frame.deserialize(frame, 16_384) + end + + test "asks for more data when frame is incomplete" do + frame = <<0, 0, 10, 0, 0, 0, 0, 0, 1>> + + assert {{:more, <<0, 0, 10, 0, 0, 0, 0, 0, 1>>}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "returns nil when buffer is empty" do + assert Frame.deserialize(<<>>, 16_384) == nil + end + + test "rejects frames that exceed max_frame_size" do + # Frame with length 100, max_frame_size 50 + frame = <<0, 0, 100, 0, 0, 0, 0, 0, 1>> <> :binary.copy(<<0>>, 100) + + assert {{:error, error_code, "Payload size too large (RFC9113§4.2)"}, _rest} = + Frame.deserialize(frame, 50) + + assert error_code == Errors.frame_size_error() + end + end + + describe "frame serialization" do + test "serializes DATA frames" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: false, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 0, 0, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes DATA frames with end_stream set" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: true, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 0, 1, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes HEADERS frames" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + fragment: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 1, 4, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes SETTINGS frames" do + frame = %Frame.Settings{ + ack: false, + settings: %{ + header_table_size: 8_192, + max_frame_size: 32_768 + } + } + + result = Frame.serialize(frame, 16_384) + assert [[header, payload]] = result + assert <<0, 0, 12, 4, 0, 0, 0, 0, 0>> = header + # Payload should contain both settings + payload_binary = IO.iodata_to_binary(payload) + assert byte_size(payload_binary) == 12 + end + + test "serializes SETTINGS ACK frames" do + frame = %Frame.Settings{ack: true, settings: %{}} + + assert Frame.serialize(frame, 16_384) == [[<<0, 0, 0, 4, 1, 0, 0, 0, 0>>, <<>>]] + end + + test "serializes PING frames" do + frame = %Frame.Ping{ack: false, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 8, 6, 0, 0, 0, 0, 0>>, <<1, 2, 3, 4, 5, 6, 7, 8>>] + ] + end + + test "serializes GOAWAY frames" do + frame = %Frame.Goaway{last_stream_id: 1, error_code: 2, debug_data: <<>>} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 8, 7, 0, 0, 0, 0, 0>>, <<0, 0, 0, 1, 0, 0, 0, 2>>] + ] + end + + test "serializes RST_STREAM frames" do + frame = %Frame.RstStream{stream_id: 1, error_code: 8} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 4, 3, 0, 0, 0, 0, 1>>, <<0, 0, 0, 8>>] + ] + end + + test "serializes WINDOW_UPDATE frames" do + frame = %Frame.WindowUpdate{stream_id: 123, size_increment: 234} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 4, 8, 0, 0, 0, 0, 123>>, <<0, 0, 0, 234>>] + ] + end + + test "splits DATA frames that exceed max_frame_size" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: false, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 2) == [ + [<<0, 0, 2, 0, 0, 0, 0, 0, 123>>, <<1, 2>>], + [<<0, 0, 1, 0, 0, 0, 0, 0, 123>>, <<3>>] + ] + end + + test "splits HEADERS frames into HEADERS + CONTINUATION" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + fragment: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 2) == [ + [<<0, 0, 2, 1, 0, 0, 0, 0, 123>>, <<1, 2>>], + [<<0, 0, 1, 9, 4, 0, 0, 0, 123>>, <<3>>] + ] + end + end +end diff --git a/grpc_server/test/grpc/server/http2/frame_test.exs.bak b/grpc_server/test/grpc/server/http2/frame_test.exs.bak new file mode 100644 index 000000000..2060b54dd --- /dev/null +++ b/grpc_server/test/grpc/server/http2/frame_test.exs.bak @@ -0,0 +1,259 @@ +defmodule GRPC.Server.HTTP2.FrameTest do + use ExUnit.Case, async: true + + alias GRPC.Server.HTTP2.{Frame, Errors} + + describe "frame deserialization" do + test "deserializes DATA frames" do + # DATA frame: stream_id=1, no padding, end_stream=false + frame = <<0, 0, 3, 0, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, %Frame.Data{stream_id: 1, end_stream: false, data: <<1, 2, 3>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes HEADERS frames" do + # HEADERS frame: stream_id=1, end_stream=false, end_headers=false + frame = <<0, 0, 3, 1, 0x00, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Headers{ + stream_id: 1, + end_stream: false, + end_headers: false, + fragment: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes SETTINGS frames" do + # SETTINGS frame: max_frame_size=32768 + frame = <<0, 0, 6, 4, 0, 0, 0, 0, 0, 0, 5, 0, 0, 128, 0>> + + assert {{:ok, %Frame.Settings{ack: false, settings: %{max_frame_size: 32_768}}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes SETTINGS ACK frames" do + frame = <<0, 0, 0, 4, 1, 0, 0, 0, 0>> + + assert {{:ok, %Frame.Settings{ack: true}}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes PING frames" do + frame = <<0, 0, 8, 6, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8>> + + assert {{:ok, %Frame.Ping{ack: false, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes PING ACK frames" do + frame = <<0, 0, 8, 6, 1, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8>> + + assert {{:ok, %Frame.Ping{ack: true, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes GOAWAY frames" do + frame = <<0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2>> + + assert {{:ok, %Frame.Goaway{last_stream_id: 1, error_code: 2, debug_data: <<>>}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes RST_STREAM frames" do + frame = <<0, 0, 4, 3, 0, 0, 0, 0, 1, 0, 0, 0, 8>> + + assert {{:ok, %Frame.RstStream{stream_id: 1, error_code: 8}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes WINDOW_UPDATE frames" do + frame = <<0, 0, 4, 8, 0, 0, 0, 0, 0, 0, 0, 0, 100>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 0, size_increment: 100}}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "deserializes CONTINUATION frames" do + frame = <<0, 0, 3, 9, 0x00, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Continuation{ + stream_id: 1, + end_headers: false, + fragment: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes PRIORITY frames" do + frame = <<0, 0, 5, 2, 0, 0, 0, 0, 1, 0::1, 12::31, 34>> + + assert {{:ok, + %Frame.Priority{ + stream_id: 1, + exclusive_dependency: false, + stream_dependency: 12, + weight: 34 + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "deserializes unknown frame types" do + # Unknown type 0xFF + frame = <<0, 0, 3, 0xFF, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, + %Frame.Unknown{ + type: 0xFF, + flags: 0, + stream_id: 1, + payload: <<1, 2, 3>> + }}, <<>>} = Frame.deserialize(frame, 16_384) + end + + test "returns extra data when frame is followed by more data" do + frame = <<0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3>> + + assert {{:ok, %Frame.Data{stream_id: 1, data: <<>>}}, <<1, 2, 3>>} = + Frame.deserialize(frame, 16_384) + end + + test "asks for more data when frame is incomplete" do + frame = <<0, 0, 10, 0, 0, 0, 0, 0, 1>> + + assert {{:more, <<0, 0, 10, 0, 0, 0, 0, 0, 1>>}, <<>>} = + Frame.deserialize(frame, 16_384) + end + + test "returns nil when buffer is empty" do + assert Frame.deserialize(<<>>, 16_384) == nil + end + + test "rejects frames that exceed max_frame_size" do + # Frame with length 100, max_frame_size 50 + frame = <<0, 0, 100, 0, 0, 0, 0, 0, 1>> <> :binary.copy(<<0>>, 100) + + assert {{:error, error_code, "Payload size too large (RFC9113§4.2)"}, _rest} = + Frame.deserialize(frame, 50) + + assert error_code == Errors.frame_size_error() + end + end + + describe "frame serialization" do + test "serializes DATA frames" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: false, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 0, 0, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes DATA frames with end_stream set" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: true, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 0, 1, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes HEADERS frames" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + fragment: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 3, 1, 4, 0, 0, 0, 123>>, <<1, 2, 3>>] + ] + end + + test "serializes SETTINGS frames" do + frame = %Frame.Settings{ + ack: false, + settings: %{ + header_table_size: 8_192, + max_frame_size: 32_768 + } + } + + result = Frame.serialize(frame, 16_384) + assert [[header, payload]] = result + assert <<0, 0, 12, 4, 0, 0, 0, 0, 0>> = header + # Payload should contain both settings + assert byte_size(payload) == 12 + end + + test "serializes SETTINGS ACK frames" do + frame = %Frame.Settings{ack: true, settings: %{}} + + assert Frame.serialize(frame, 16_384) == [[<<0, 0, 0, 4, 1, 0, 0, 0, 0>>, <<>>]] + end + + test "serializes PING frames" do + frame = %Frame.Ping{ack: false, payload: <<1, 2, 3, 4, 5, 6, 7, 8>>} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 8, 6, 0, 0, 0, 0, 0>>, <<1, 2, 3, 4, 5, 6, 7, 8>>] + ] + end + + test "serializes GOAWAY frames" do + frame = %Frame.Goaway{last_stream_id: 1, error_code: 2, debug_data: <<>>} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 8, 7, 0, 0, 0, 0, 0>>, <<0, 0, 0, 1, 0, 0, 0, 2>>] + ] + end + + test "serializes RST_STREAM frames" do + frame = %Frame.RstStream{stream_id: 1, error_code: 8} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 4, 3, 0, 0, 0, 0, 1>>, <<0, 0, 0, 8>>] + ] + end + + test "serializes WINDOW_UPDATE frames" do + frame = %Frame.WindowUpdate{stream_id: 123, size_increment: 234} + + assert Frame.serialize(frame, 16_384) == [ + [<<0, 0, 4, 8, 0, 0, 0, 0, 123>>, <<0, 0, 0, 234>>] + ] + end + + test "splits DATA frames that exceed max_frame_size" do + frame = %Frame.Data{ + stream_id: 123, + end_stream: false, + data: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 2) == [ + [<<0, 0, 2, 0, 0, 0, 0, 0, 123>>, <<1, 2>>], + [<<0, 0, 1, 0, 1, 0, 0, 0, 123>>, <<3>>] + ] + end + + test "splits HEADERS frames into HEADERS + CONTINUATION" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + fragment: <<1, 2, 3>> + } + + assert Frame.serialize(frame, 2) == [ + [<<0, 0, 2, 1, 0, 0, 0, 0, 123>>, <<1, 2>>], + [<<0, 0, 1, 9, 4, 0, 0, 0, 123>>, <<3>>] + ] + end + end +end diff --git a/grpc_server/test/grpc/server/http2/settings_test.exs b/grpc_server/test/grpc/server/http2/settings_test.exs new file mode 100644 index 000000000..8d25105aa --- /dev/null +++ b/grpc_server/test/grpc/server/http2/settings_test.exs @@ -0,0 +1,44 @@ +defmodule GRPC.HTTP2.SettingsTest do + use ExUnit.Case, async: true + + alias GRPC.HTTP2.Settings + + describe "default settings" do + test "has correct default values" do + settings = %Settings{} + + assert settings.header_table_size == 4_096 + assert settings.max_concurrent_streams == :infinity + assert settings.initial_window_size == 65_535 + assert settings.max_frame_size == 16_384 + assert settings.max_header_list_size == :infinity + end + end + + describe "settings modification" do + test "can update header_table_size" do + settings = %Settings{header_table_size: 8_192} + assert settings.header_table_size == 8_192 + end + + test "can update max_concurrent_streams" do + settings = %Settings{max_concurrent_streams: 100} + assert settings.max_concurrent_streams == 100 + end + + test "can update initial_window_size" do + settings = %Settings{initial_window_size: 32_768} + assert settings.initial_window_size == 32_768 + end + + test "can update max_frame_size" do + settings = %Settings{max_frame_size: 32_768} + assert settings.max_frame_size == 32_768 + end + + test "can update max_header_list_size" do + settings = %Settings{max_header_list_size: 16_384} + assert settings.max_header_list_size == 16_384 + end + end +end From cdd7294a40495aa9fcf0ad64f40e3a5e69ebf8f1 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 15:27:06 -0300 Subject: [PATCH 03/47] feat: support thousand_island server adapter --- benchmark/bin/profile.exs | 87 +++++++ benchmark/bin/test.exs | 4 +- benchmark/config/config.exs | 3 +- benchmark/lib/benchmark/server_manager.ex | 27 +- benchmark/lib/grpc/worker_server.ex | 12 +- benchmark/lib/mix/tasks/benchmark.profile.ex | 105 ++++++++ benchmark/lib/mix/tasks/benchmark.test.ex | 41 +++- benchmark/lib/mix/tasks/benchmark.worker.ex | 10 +- benchmark/mix.lock | 1 + benchmark/profile_script.exs | 34 +++ benchmark/scripts/profile_server.exs | 70 ++++++ benchmark/scripts/quick_bench.sh | 20 ++ benchmark/scripts/run_optimized.sh | 15 ++ benchmark/test/benchmark_test.exs | 1 - grpc_client/mix.lock | 1 + .../thousand_island_adapter_test.exs | 230 ++++++++++++++++++ interop/config/config.exs | 2 +- interop/mix.lock | 1 + interop/script/quick_test.exs | 52 ++++ interop/script/run.exs | 54 +++- interop/script/test_custom_metadata.exs | 34 +++ interop/script/test_one.exs | 43 ++++ mix.exs | 3 +- mix.lock | 1 + 24 files changed, 806 insertions(+), 45 deletions(-) create mode 100755 benchmark/bin/profile.exs create mode 100644 benchmark/lib/mix/tasks/benchmark.profile.ex create mode 100644 benchmark/profile_script.exs create mode 100644 benchmark/scripts/profile_server.exs create mode 100755 benchmark/scripts/quick_bench.sh create mode 100755 benchmark/scripts/run_optimized.sh create mode 100644 grpc_client/test/grpc/integration/thousand_island_adapter_test.exs create mode 100644 interop/script/quick_test.exs create mode 100644 interop/script/test_custom_metadata.exs create mode 100644 interop/script/test_one.exs diff --git a/benchmark/bin/profile.exs b/benchmark/bin/profile.exs new file mode 100755 index 000000000..45fa3fb61 --- /dev/null +++ b/benchmark/bin/profile.exs @@ -0,0 +1,87 @@ +#!/usr/bin/env elixir + +# Profile script to find bottlenecks in gRPC server +Mix.install([ + {:grpc, path: Path.expand("../..", __DIR__)}, + {:thousand_island, "~> 1.0"} +]) + +# Start the server +server_config = + Grpc.Testing.ServerConfig.new( + async_server_threads: 1, + port: 10000 + ) + +{:ok, _server_pid} = Benchmark.ServerManager.start_server(server_config) +Process.sleep(1000) + +# Connect client +{:ok, ch} = GRPC.Stub.connect("localhost:10000") + +payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) +req = Grpc.Testing.SimpleRequest.new( + response_type: payload_type, + response_size: 0, + payload: Grpc.Testing.Payload.new(type: payload_type, body: <<>>) +) + +# Warm up +IO.puts("Warming up...") +Enum.each(1..100, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) +end) + +IO.puts("\nStarting profiling with :fprof...") + +# Profile with fprof +:fprof.trace([:start, {:procs, :all}]) + +# Run workload +Enum.each(1..1000, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) +end) + +:fprof.trace(:stop) +:fprof.profile() +:fprof.analyse([:totals, {:sort, :acc}, {:dest, ~c"fprof_analysis.txt"}]) + +IO.puts("fprof analysis written to fprof_analysis.txt") + +# Now profile with eprof for better overview +IO.puts("\nStarting profiling with :eprof...") + +:eprof.start() +:eprof.start_profiling([self()]) + +Enum.each(1..1000, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) +end) + +:eprof.stop_profiling() +:eprof.analyze([:total, {:sort, :time}]) +:eprof.log(~c"eprof_analysis.txt") + +IO.puts("\neprof analysis written to eprof_analysis.txt") + +# Analyze specific modules +IO.puts("\nAnalyzing ThousandIsland adapter modules...") +:eprof.start_profiling([self()]) + +Enum.each(1..1000, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) +end) + +:eprof.stop_profiling() +:eprof.analyze([ + :total, + {:sort, :time}, + {:filter, [ + {GRPC.Server.Adapters.ThousandIsland.Handler, :_, :_}, + {GRPC.Server.HTTP2.Connection, :_, :_}, + {GRPC.Server.HTTP2.Dispatcher, :_, :_}, + {GRPC.Server.Cache, :_, :_} + ]} +]) + +IO.puts("\nDone! Check fprof_analysis.txt and eprof_analysis.txt for details.") diff --git a/benchmark/bin/test.exs b/benchmark/bin/test.exs index 02b99b3e0..8fba3bec0 100644 --- a/benchmark/bin/test.exs +++ b/benchmark/bin/test.exs @@ -3,10 +3,10 @@ server = async_server_threads: 1, port: 10000, channel_args: [ - Grpc.Testing.ChannelArg.new( + %Grpc.Testing.ChannelArg{ name: "grpc.optimization_target", value: {:str_value, "latency"} - ) + } ] ) diff --git a/benchmark/config/config.exs b/benchmark/config/config.exs index cf3e8d596..499dfb5fd 100644 --- a/benchmark/config/config.exs +++ b/benchmark/config/config.exs @@ -1,5 +1,6 @@ import Config -config :logger, level: :info +# Disable logging by default for better performance +config :logger, level: :error import_config "#{Mix.env()}.exs" diff --git a/benchmark/lib/benchmark/server_manager.ex b/benchmark/lib/benchmark/server_manager.ex index 68a3b6a55..706a12c15 100644 --- a/benchmark/lib/benchmark/server_manager.ex +++ b/benchmark/lib/benchmark/server_manager.ex @@ -1,13 +1,25 @@ defmodule Benchmark.ServerManager do - def start_server(%Grpc.Testing.ServerConfig{} = config) do + def start_server(%Grpc.Testing.ServerConfig{} = config, opts \\ []) do # get security payload_type = Benchmark.Manager.payload_type(config.payload_config) - start_server(payload_type, config) + start_server(payload_type, config, opts) end - def start_server(:protobuf, config) do + def start_server(:protobuf, config, opts) do cores = Benchmark.Manager.set_cores(config.core_limit) - {:ok, pid, port} = GRPC.Server.start(Grpc.Testing.BenchmarkService.Server, config.port) + + # Extract adapter option, default to Cowboy + adapter = Keyword.get(opts, :adapter, GRPC.Server.Adapters.Cowboy) + adapter_name = adapter |> Module.split() |> List.last() + + IO.puts("Starting server with #{adapter_name} adapter on port #{config.port}...") + + {:ok, pid, port} = + GRPC.Server.start( + Grpc.Testing.BenchmarkService.Server, + config.port, + adapter: adapter + ) %Benchmark.Server{ cores: cores, @@ -18,5 +30,10 @@ defmodule Benchmark.ServerManager do } end - def start_server(_, _), do: raise(GRPC.RPCError, status: :unimplemented) + def start_server(_, _, _), do: raise(GRPC.RPCError, status: :unimplemented) + + def stop_server(%Benchmark.Server{} = _server, opts \\ []) do + adapter = Keyword.get(opts, :adapter, GRPC.Server.Adapters.Cowboy) + GRPC.Server.stop(Grpc.Testing.BenchmarkService.Server, adapter: adapter) + end end diff --git a/benchmark/lib/grpc/worker_server.ex b/benchmark/lib/grpc/worker_server.ex index e080272fc..ed9630fe8 100644 --- a/benchmark/lib/grpc/worker_server.ex +++ b/benchmark/lib/grpc/worker_server.ex @@ -21,11 +21,11 @@ defmodule Grpc.Testing.WorkerService.Server do {server, stats} = Benchmark.Server.get_stats(server) status = - Grpc.Testing.ServerStatus.new( + %Grpc.Testing.ServerStatus{ stats: stats, port: server.port, cores: server.cores - ) + } {server, status} @@ -33,7 +33,7 @@ defmodule Grpc.Testing.WorkerService.Server do {server, stats} = Benchmark.Server.get_stats(server, mark) status = - Grpc.Testing.ServerStatus.new(cores: server.cores, port: server.port, stats: stats) + %Grpc.Testing.ServerStatus{cores: server.cores, port: server.port, stats: stats} {server, status} end @@ -53,11 +53,11 @@ defmodule Grpc.Testing.WorkerService.Server do case args.argtype do {:setup, client_config} -> manager = ClientManager.start_client(client_config) - {Grpc.Testing.ClientStatus.new(), manager} + {%Grpc.Testing.ClientStatus{}, manager} {:mark, mark} -> stats = ClientManager.get_stats(manager, mark.reset) - {Grpc.Testing.ClientStatus.new(stats: stats), manager} + {%Grpc.Testing.ClientStatus{stats: stats}, manager} end Logger.debug("Client send reply #{inspect(status)}") @@ -70,6 +70,6 @@ defmodule Grpc.Testing.WorkerService.Server do Logger.debug("Received quit_work") Logger.debug(inspect(stream.local[:main_pid])) send(stream.local[:main_pid], {:quit, self()}) - Grpc.Testing.Void.new() + %Grpc.Testing.Void{} end end diff --git a/benchmark/lib/mix/tasks/benchmark.profile.ex b/benchmark/lib/mix/tasks/benchmark.profile.ex new file mode 100644 index 000000000..bec8e6ea4 --- /dev/null +++ b/benchmark/lib/mix/tasks/benchmark.profile.ex @@ -0,0 +1,105 @@ +defmodule Mix.Tasks.Benchmark.Profile do + use Mix.Task + require Logger + + @shortdoc "Profile the gRPC benchmark to find bottlenecks" + + @moduledoc """ + Profiles the gRPC benchmark using Erlang's profiling tools. + + Usage: + mix benchmark.profile --adapter=thousand_island --requests=500 + mix benchmark.profile --adapter=cowboy --requests=500 + """ + + def run(args) do + # Disable logging to reduce noise + Logger.configure(level: :error) + + Mix.Task.run("app.start") + + {opts, _, _} = OptionParser.parse(args, + switches: [adapter: :string, requests: :integer, port: :integer], + aliases: [a: :adapter, r: :requests] + ) + + adapter_name = opts[:adapter] || "thousand_island" + num_requests = opts[:requests] || 500 + port = opts[:port] || 10000 + + adapter = case String.downcase(adapter_name) do + "thousand_island" -> GRPC.Server.Adapters.ThousandIsland + "cowboy" -> GRPC.Server.Adapters.Cowboy + _ -> GRPC.Server.Adapters.ThousandIsland + end + + # Start server + IO.puts("Starting #{adapter_name} server on port #{port}...") + + server = %Grpc.Testing.ServerConfig{ + async_server_threads: 1, + port: port + } + + _server = Benchmark.ServerManager.start_server(server, adapter: adapter) + Process.sleep(500) + + # Connect client + {:ok, ch} = GRPC.Stub.connect("localhost:#{port}") + + # Prepare request + payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) + req = %Grpc.Testing.SimpleRequest{ + response_type: payload_type, + response_size: 0, + payload: %Grpc.Testing.Payload{type: payload_type, body: <<>>} + } + + # Warm up + IO.puts("Warming up with 100 requests...") + Enum.each(1..100, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) + end) + + IO.puts("\n=== Profiling with :eprof (#{num_requests} requests) ===") + IO.puts("This will show time spent in each function...\n") + + # Get the handler process pid - we need to profile the server side + Process.sleep(100) + + # Profile using :eprof - it measures time, not just call counts + spawn(fn -> + Process.sleep(50) + + # Start profiling all processes + :eprof.start() + :eprof.start_profiling(:processes) + + # Let it run for the benchmark + Process.sleep(div(num_requests * 2, 1)) + + :eprof.stop_profiling() + IO.puts("\n\n=== EPROF Analysis ===") + :eprof.analyze([:total]) + :eprof.stop() + end) + + # Run benchmark + start_time = System.monotonic_time(:millisecond) + Enum.each(1..num_requests, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) + end) + end_time = System.monotonic_time(:millisecond) + + elapsed = end_time - start_time + req_per_sec = num_requests / (elapsed / 1000) + + IO.puts("\nPerformance: #{Float.round(req_per_sec, 2)} req/s (#{num_requests} requests in #{elapsed}ms)") + + # Wait for profiling to complete + Process.sleep(2000) + + IO.puts("\n=== Done! ===") + IO.puts("Look for GRPC.Server.* modules above to see where time is spent.") + end +end diff --git a/benchmark/lib/mix/tasks/benchmark.test.ex b/benchmark/lib/mix/tasks/benchmark.test.ex index a9661fb54..a763057c6 100644 --- a/benchmark/lib/mix/tasks/benchmark.test.ex +++ b/benchmark/lib/mix/tasks/benchmark.test.ex @@ -1,19 +1,22 @@ defmodule Mix.Tasks.Benchmark.Test do @moduledoc """ Runs a simple gRPC benchmark test. - + This task starts a benchmark server and client, runs performance tests, and reports statistics. - + ## Usage - + mix benchmark.test - + mix benchmark.test --adapter=thousand_island + mix benchmark.test --adapter=cowboy + ## Options - + * `--port` - Server port (default: 10000) * `--requests` - Number of requests to send (default: 1000) - + * `--adapter` - Server adapter: cowboy or thousand_island (default: cowboy) + """ use Mix.Task @@ -27,13 +30,27 @@ defmodule Mix.Tasks.Benchmark.Test do {parsed, _remaining, _invalid} = OptionParser.parse(args, - strict: [port: :integer, requests: :integer] + strict: [port: :integer, requests: :integer, adapter: :string] ) port = Keyword.get(parsed, :port, 10000) num_requests = Keyword.get(parsed, :requests, 1000) + adapter_name = Keyword.get(parsed, :adapter, "cowboy") + + adapter = + case String.downcase(adapter_name) do + "thousand_island" -> + GRPC.Server.Adapters.ThousandIsland + + "cowboy" -> + GRPC.Server.Adapters.Cowboy - Logger.info("Starting benchmark test on port #{port}") + _ -> + Logger.error("Unknown adapter: #{adapter_name}. Using Cowboy.") + GRPC.Server.Adapters.Cowboy + end + + Logger.info("Starting benchmark test on port #{port} with #{adapter_name} adapter") # Configure and start server server = %Grpc.Testing.ServerConfig{ @@ -48,7 +65,7 @@ defmodule Mix.Tasks.Benchmark.Test do } Logger.info("Starting server...") - server = Benchmark.ServerManager.start_server(server) + server = Benchmark.ServerManager.start_server(server, adapter: adapter) Logger.info("Server started: #{inspect(server)}") # Configure client @@ -98,7 +115,7 @@ defmodule Mix.Tasks.Benchmark.Test do # Connect and warm up Logger.info("Connecting to server...") {:ok, ch} = GRPC.Stub.connect("localhost:#{port}") - + Logger.info("Warming up...") Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) @@ -132,6 +149,10 @@ defmodule Mix.Tasks.Benchmark.Test do IO.inspect(stats, label: "Stats", pretty: true) Logger.info("=" |> String.duplicate(60)) + # Clean shutdown + Logger.info("Stopping server...") + Benchmark.ServerManager.stop_server(server, adapter: adapter) + Logger.info("Server stopped") :ok end end diff --git a/benchmark/lib/mix/tasks/benchmark.worker.ex b/benchmark/lib/mix/tasks/benchmark.worker.ex index 3b24fc7e7..c89865b65 100644 --- a/benchmark/lib/mix/tasks/benchmark.worker.ex +++ b/benchmark/lib/mix/tasks/benchmark.worker.ex @@ -1,15 +1,15 @@ defmodule Mix.Tasks.Benchmark.Worker do @moduledoc """ Starts a gRPC worker server for benchmarking. - + ## Usage - + mix benchmark.worker --port=10000 - + ## Options - + * `--port` - Port to listen on (required) - + """ use Mix.Task diff --git a/benchmark/mix.lock b/benchmark/mix.lock index 427f40b39..c2a78ae07 100644 --- a/benchmark/mix.lock +++ b/benchmark/mix.lock @@ -11,4 +11,5 @@ "protobuf": {:hex, :protobuf, "0.15.0", "c9fc1e9fc1682b05c601df536d5ff21877b55e2023e0466a3855cc1273b74dcb", [:mix], [{:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "5d7bb325319db1d668838d2691c31c7b793c34111aec87d5ee467a39dac6e051"}, "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "thousand_island": {:hex, :thousand_island, "1.4.2", "735fa783005d1703359bbd2d3a5a3a398075ba4456e5afe3c5b7cf4666303d36", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1c7637f16558fc1c35746d5ee0e83b18b8e59e18d28affd1f2fa1645f8bc7473"}, } diff --git a/benchmark/profile_script.exs b/benchmark/profile_script.exs new file mode 100644 index 000000000..f2521cfc2 --- /dev/null +++ b/benchmark/profile_script.exs @@ -0,0 +1,34 @@ +# Script to profile gRPC server performance +# Run with: mix profile.eprof profile_script.exs +# Note: mix profile.eprof runs this script twice (warmup + profile) + +# Disable logging +Logger.configure(level: :error) + +# Start the server +{:ok, _} = Application.ensure_all_started(:grpc_server) +{:ok, _} = Application.ensure_all_started(:benchmark) + +server = %Grpc.Testing.ServerConfig{ + async_server_threads: 1, + port: 10000 +} + +_server = Benchmark.ServerManager.start_server(server, adapter: GRPC.Server.Adapters.ThousandIsland) +Process.sleep(500) + +# Connect client +{:ok, ch} = GRPC.Stub.connect("localhost:10000") + +# Prepare request +payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) +req = %Grpc.Testing.SimpleRequest{ + response_type: payload_type, + response_size: 0, + payload: %Grpc.Testing.Payload{type: payload_type, body: <<>>} +} + +# This code will be profiled +Enum.each(1..500, fn _ -> + Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) +end) diff --git a/benchmark/scripts/profile_server.exs b/benchmark/scripts/profile_server.exs new file mode 100644 index 000000000..054e9a135 --- /dev/null +++ b/benchmark/scripts/profile_server.exs @@ -0,0 +1,70 @@ +# Profile gRPC server to identify performance bottlenecks +# Run with: mix run scripts/profile_server.exs + +require Logger + +Logger.configure(level: :warning) + +# Start server +{:ok, _pid, port} = GRPC.Server.start_endpoint(Benchmark.ServerManager, 0) + +IO.puts("Server started on port #{port}") +IO.puts("Waiting 2 seconds for server to be ready...") +Process.sleep(2000) + +# Prepare client connection +opts = [adapter: GRPC.Client.Adapters.Gun] +channel = GRPC.Client.Stub.connect("127.0.0.1:#{port}", opts) + +# Prepare request +request = Grpc.Testing.SimpleRequest.new( + response_type: :COMPRESSABLE, + response_size: 314_159, + payload: Grpc.Testing.Payload.new( + type: :COMPRESSABLE, + body: :binary.copy(<<0>>, 271_828) + ) +) + +IO.puts("\n=== Starting profiling with :fprof ===") +IO.puts("Warmup: sending 100 requests...") + +# Warmup +for _ <- 1..100 do + {:ok, _response} = channel + |> Grpc.Testing.BenchmarkService.Stub.unary_call(request) +end + +IO.puts("Warmup complete. Starting profiling...") +Process.sleep(1000) + +# Start profiling +:fprof.trace([:start, {:procs, Process.list()}]) + +# Run profiled requests +for _ <- 1..1000 do + {:ok, _response} = channel + |> Grpc.Testing.BenchmarkService.Stub.unary_call(request) +end + +# Stop profiling +:fprof.trace(:stop) + +IO.puts("Profiling complete. Analyzing results...") + +# Analyze +:fprof.profile() +:fprof.analyse([ + totals: true, + sort: :acc, + dest: ~c"benchmark/profile_results.txt" +]) + +IO.puts("\nResults saved to benchmark/profile_results.txt") +IO.puts("\nTop functions by accumulated time:") +:fprof.analyse([totals: true, sort: :acc, dest: []]) + +# Cleanup +GRPC.Server.stop_endpoint(Benchmark.ServerManager) + +IO.puts("\n=== Profiling complete ===") diff --git a/benchmark/scripts/quick_bench.sh b/benchmark/scripts/quick_bench.sh new file mode 100755 index 000000000..ab16146cf --- /dev/null +++ b/benchmark/scripts/quick_bench.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -e + +echo "========================================" +echo "Quick Performance Benchmark" +echo "========================================" +echo "" + +cd "$(dirname "$0")/.." + +echo "Running optimized benchmark..." +echo "" + +MIX_ENV=prod mix benchmark.test --codec=proto --requests=30000 2>&1 | tee /tmp/bench_output.txt + +echo "" +echo "========================================" +echo "Results Summary:" +echo "========================================" +grep -E "req/s|Requests|requests/sec" /tmp/bench_output.txt || echo "Check full output above" diff --git a/benchmark/scripts/run_optimized.sh b/benchmark/scripts/run_optimized.sh new file mode 100755 index 000000000..42032415b --- /dev/null +++ b/benchmark/scripts/run_optimized.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Run benchmark with optimized Erlang VM flags + +set -e + +# Optimize Erlang VM for performance +export ERL_FLAGS="+sbwt very_long +swt very_low +sub true +pc unicode" +export ELIXIR_ERL_OPTIONS="+fnu" + +# Compiler optimizations +export ERL_COMPILER_OPTIONS="[inline,{inline_size,128}]" + +# Run benchmark +cd "$(dirname "$0")/.." +MIX_ENV=prod mix benchmark.test --codec=proto --requests=30000 diff --git a/benchmark/test/benchmark_test.exs b/benchmark/test/benchmark_test.exs index 75bbbaf6d..cc6f6211b 100644 --- a/benchmark/test/benchmark_test.exs +++ b/benchmark/test/benchmark_test.exs @@ -1,5 +1,4 @@ defmodule BenchmarkgTest do use ExUnit.Case doctest Benchmark - end diff --git a/grpc_client/mix.lock b/grpc_client/mix.lock index 32c8ac98a..a3e339e6c 100644 --- a/grpc_client/mix.lock +++ b/grpc_client/mix.lock @@ -21,4 +21,5 @@ "protobuf": {:hex, :protobuf, "0.15.0", "c9fc1e9fc1682b05c601df536d5ff21877b55e2023e0466a3855cc1273b74dcb", [:mix], [{:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "5d7bb325319db1d668838d2691c31c7b793c34111aec87d5ee467a39dac6e051"}, "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "thousand_island": {:hex, :thousand_island, "1.4.2", "735fa783005d1703359bbd2d3a5a3a398075ba4456e5afe3c5b7cf4666303d36", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1c7637f16558fc1c35746d5ee0e83b18b8e59e18d28affd1f2fa1645f8bc7473"}, } diff --git a/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs b/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs new file mode 100644 index 000000000..53e543230 --- /dev/null +++ b/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs @@ -0,0 +1,230 @@ +defmodule GRPC.Integration.ThousandIslandAdapterTest do + @moduledoc """ + Integration tests for the ThousandIsland adapter. + """ + + use GRPC.Integration.TestCase + + # Test server for unary RPC + defmodule HelloServer do + use GRPC.Server, service: Helloworld.Greeter.Service + + def say_hello(request, materializer) do + GRPC.Stream.unary(request, materializer: materializer) + |> GRPC.Stream.map(fn req -> + %Helloworld.HelloReply{message: "Hello #{req.name}!"} + end) + |> GRPC.Stream.run() + end + end + + # Test server for all streaming types + defmodule RouteServer do + use GRPC.Server, service: Routeguide.RouteGuide.Service + + def get_feature(point, materializer) do + GRPC.Stream.unary(point, materializer: materializer) + |> GRPC.Stream.map(fn point -> + %Routeguide.Feature{ + location: point, + name: "Feature at #{point.latitude},#{point.longitude}" + } + end) + |> GRPC.Stream.run() + end + + def list_features(_rectangle, materializer) do + features = + Enum.map(1..5, fn i -> + %Routeguide.Feature{ + location: %Routeguide.Point{latitude: i * 10, longitude: i * 20}, + name: "Feature #{i}" + } + end) + + features + |> GRPC.Stream.from() + |> GRPC.Stream.run_with(materializer) + end + + def record_route(point_stream, _materializer) do + # For client streaming, process input and return single response + count = Enum.reduce(point_stream, 0, fn _point, acc -> acc + 1 end) + + %Routeguide.RouteSummary{ + point_count: count, + feature_count: count, + distance: count * 100, + elapsed_time: count * 10 + } + end + + def route_chat(note_stream, materializer) do + GRPC.Stream.from(note_stream) + |> GRPC.Stream.map(fn note -> + %Routeguide.RouteNote{ + location: note.location, + message: "Echo: #{note.message}" + } + end) + |> GRPC.Stream.run_with(materializer) + end + end + + describe "ThousandIsland adapter - unary RPC" do + test "handles simple unary request/response" do + run_server([HelloServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + request = %Helloworld.HelloRequest{name: "ThousandIsland"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + + assert response.message == "Hello ThousandIsland!" + + GRPC.Stub.disconnect(channel) + end) + end + + test "handles multiple sequential unary calls" do + run_server([HelloServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + for i <- 1..10 do + request = %Helloworld.HelloRequest{name: "User#{i}"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + assert response.message == "Hello User#{i}!" + end + + GRPC.Stub.disconnect(channel) + end) + end + end + + describe "ThousandIsland adapter - server streaming RPC" do + test "receives multiple responses from server" do + run_server([RouteServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + rectangle = %Routeguide.Rectangle{ + lo: %Routeguide.Point{latitude: 0, longitude: 0}, + hi: %Routeguide.Point{latitude: 100, longitude: 100} + } + + {:ok, stream} = channel |> Routeguide.RouteGuide.Stub.list_features(rectangle) + + features = stream |> Enum.map(fn {:ok, f} -> f end) |> Enum.to_list() + + assert length(features) == 5 + + Enum.each(1..5, fn i -> + feature = Enum.at(features, i - 1) + assert feature.name == "Feature #{i}" + assert feature.location.latitude == i * 10 + assert feature.location.longitude == i * 20 + end) + + GRPC.Stub.disconnect(channel) + end) + end + end + + describe "ThousandIsland adapter - client streaming RPC" do + test "sends multiple requests and receives single response" do + run_server([RouteServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + points = [ + %Routeguide.Point{latitude: 10, longitude: 20}, + %Routeguide.Point{latitude: 30, longitude: 40}, + %Routeguide.Point{latitude: 50, longitude: 60} + ] + + stream = channel |> Routeguide.RouteGuide.Stub.record_route() + + Enum.each(points, fn point -> + GRPC.Stub.send_request(stream, point) + end) + + GRPC.Stub.end_stream(stream) + + {:ok, summary} = GRPC.Stub.recv(stream) + + assert summary.point_count == 3 + assert summary.feature_count == 3 + assert summary.distance == 300 + + GRPC.Stub.disconnect(channel) + end) + end + end + + describe "ThousandIsland adapter - bidirectional streaming RPC" do + test "exchanges messages bidirectionally" do + run_server([RouteServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + notes = [ + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 1, longitude: 2}, + message: "First note" + }, + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 3, longitude: 4}, + message: "Second note" + }, + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 5, longitude: 6}, + message: "Third note" + } + ] + + bidi_stream = channel |> Routeguide.RouteGuide.Stub.route_chat() + + Enum.each(notes, fn note -> + GRPC.Stub.send_request(bidi_stream, note) + end) + + GRPC.Stub.end_stream(bidi_stream) + + {:ok, response_stream} = GRPC.Stub.recv(bidi_stream) + responses = response_stream |> Enum.map(fn {:ok, r} -> r end) |> Enum.to_list() + + assert length(responses) == 3 + assert Enum.at(responses, 0).message == "Echo: First note" + assert Enum.at(responses, 1).message == "Echo: Second note" + assert Enum.at(responses, 2).message == "Echo: Third note" + + GRPC.Stub.disconnect(channel) + end) + end + end + + describe "ThousandIsland adapter - HTTP/2 protocol validation" do + test "handles multiple concurrent unary calls on same connection" do + run_server([HelloServer], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + tasks = + 1..10 + |> Enum.map(fn i -> + Task.async(fn -> + request = %Helloworld.HelloRequest{name: "Concurrent#{i}"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + response + end) + end) + + responses = Task.await_many(tasks, 5000) + + assert length(responses) == 10 + + Enum.each(1..10, fn i -> + response = Enum.find(responses, fn r -> r.message == "Hello Concurrent#{i}!" end) + assert response != nil + end) + + GRPC.Stub.disconnect(channel) + end) + end + end +end diff --git a/interop/config/config.exs b/interop/config/config.exs index 63787d4b3..477c90797 100644 --- a/interop/config/config.exs +++ b/interop/config/config.exs @@ -1,3 +1,3 @@ import Config -config :logger, level: :warning +config :logger, level: :info diff --git a/interop/mix.lock b/interop/mix.lock index 20e08e875..cce13d505 100644 --- a/interop/mix.lock +++ b/interop/mix.lock @@ -15,4 +15,5 @@ "recon": {:hex, :recon, "2.5.6", "9052588e83bfedfd9b72e1034532aee2a5369d9d9343b61aeb7fbce761010741", [:mix, :rebar3], [], "hexpm", "96c6799792d735cc0f0fd0f86267e9d351e63339cbe03df9d162010cefc26bb0"}, "statix": {:hex, :statix, "1.4.0", "c822abd1e60e62828e8460e932515d0717aa3c089b44cc3f795d43b94570b3a8", [:mix], [], "hexpm", "507373cc80925a9b6856cb14ba17f6125552434314f6613c907d295a09d1a375"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "thousand_island": {:hex, :thousand_island, "1.4.2", "735fa783005d1703359bbd2d3a5a3a398075ba4456e5afe3c5b7cf4666303d36", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1c7637f16558fc1c35746d5ee0e83b18b8e59e18d28affd1f2fa1645f8bc7473"}, } diff --git a/interop/script/quick_test.exs b/interop/script/quick_test.exs new file mode 100644 index 000000000..13dc6a11c --- /dev/null +++ b/interop/script/quick_test.exs @@ -0,0 +1,52 @@ +Logger.configure(level: :warning) + +alias GRPC.Client.Adapters.Gun +alias Interop.Client + +{:ok, _pid, port} = GRPC.Server.start_endpoint(Interop.Endpoint, 0, adapter: GRPC.Server.Adapters.ThousandIsland) +IO.puts("Server started on port #{port}") + +opts = [adapter: Gun] +ch = Client.connect("127.0.0.1:#{port}", opts) + +tests = [ + {"empty_unary", fn -> Client.empty_unary!(ch) end}, + {"cacheable_unary", fn -> Client.cacheable_unary!(ch) end}, + {"large_unary", fn -> Client.large_unary!(ch) end}, + {"large_unary2", fn -> Client.large_unary2!(ch) end}, + {"client_compressed_unary", fn -> Client.client_compressed_unary!(ch) end}, + {"server_compressed_unary", fn -> Client.server_compressed_unary!(ch) end}, + {"client_streaming", fn -> Client.client_streaming!(ch) end}, + {"client_compressed_streaming", fn -> Client.client_compressed_streaming!(ch) end}, + {"server_streaming", fn -> Client.server_streaming!(ch) end}, + {"server_compressed_streaming", fn -> Client.server_compressed_streaming!(ch) end}, + {"ping_pong", fn -> Client.ping_pong!(ch) end}, + {"empty_stream", fn -> Client.empty_stream!(ch) end}, + {"custom_metadata", fn -> Client.custom_metadata!(ch) end}, + {"status_code_and_message", fn -> Client.status_code_and_message!(ch) end}, + {"unimplemented_service", fn -> Client.unimplemented_service!(ch) end}, + {"cancel_after_begin", fn -> Client.cancel_after_begin!(ch) end}, + {"cancel_after_first_response", fn -> Client.cancel_after_first_response!(ch) end}, + # Note: timeout_on_sleeping_server skipped - requires client adapter fix for DEADLINE_EXCEEDED status +] + +results = %{passed: 0, failed: 0} + +results = for {name, test_fn} <- tests, reduce: results do + acc -> + try do + test_fn.() + IO.puts("✓ #{name}") + %{acc | passed: acc.passed + 1} + rescue + e -> + IO.puts("✗ #{name}: #{Exception.message(e)}") + %{acc | failed: acc.failed + 1} + end +end + +IO.puts("\n========================================") +IO.puts("Results: #{results.passed}/#{results.passed + results.failed} tests passed") +IO.puts("========================================") + +GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) diff --git a/interop/script/run.exs b/interop/script/run.exs index dc1b42c28..72fc1e792 100644 --- a/interop/script/run.exs +++ b/interop/script/run.exs @@ -1,6 +1,6 @@ {options, _, _} = OptionParser.parse(System.argv(), - strict: [rounds: :integer, concurrency: :integer, port: :integer, level: :string] + strict: [rounds: :integer, concurrency: :integer, port: :integer, level: :string, adapter: :string] ) rounds = Keyword.get(options, :rounds) || 20 @@ -9,18 +9,29 @@ concurrency = Keyword.get(options, :concurrency) || max_concurrency port = Keyword.get(options, :port) || 0 level = Keyword.get(options, :level) || "warning" level = String.to_existing_atom(level) +server_adapter_name = Keyword.get(options, :adapter) || "both" require Logger Logger.configure(level: level) -Logger.info("Rounds: #{rounds}; concurrency: #{concurrency}; port: #{port}") +Logger.info("Rounds: #{rounds}; concurrency: #{concurrency}; port: #{port}; server_adapter: #{server_adapter_name}") alias GRPC.Client.Adapters.Gun alias GRPC.Client.Adapters.Mint alias Interop.Client -{:ok, _pid, port} = GRPC.Server.start_endpoint(Interop.Endpoint, port) +# Determine which server adapters to test +server_adapters = case server_adapter_name do + "cowboy" -> [GRPC.Server.Adapters.Cowboy] + "thousand_island" -> [GRPC.Server.Adapters.ThousandIsland] + "both" -> [GRPC.Server.Adapters.Cowboy, GRPC.Server.Adapters.ThousandIsland] + _ -> + IO.puts("Unknown adapter: #{server_adapter_name}. Using both.") + [GRPC.Server.Adapters.Cowboy, GRPC.Server.Adapters.ThousandIsland] +end + +client_adapters = [Gun, Mint] defmodule InteropTestRunner do def run(_cli, adapter, port, rounds) do @@ -65,15 +76,32 @@ res = DynamicSupervisor.start_link(strategy: :one_for_one, name: GRPC.Client.Sup {:ok, pid} end -for adapter <- [Gun, Mint] do - Logger.info("Starting run for adapter: #{adapter}") - args = [adapter, port, rounds] - stream_opts = [max_concurrency: concurrency, ordered: false, timeout: :infinity] - - 1..concurrency - |> Task.async_stream(InteropTestRunner, :run, args, stream_opts) - |> Enum.to_list() +# Test each server adapter +for server_adapter <- server_adapters do + server_name = server_adapter |> Module.split() |> List.last() + Logger.info("========================================") + Logger.info("Testing with SERVER ADAPTER: #{server_name}") + Logger.info("========================================") + + {:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, port, adapter: server_adapter) + Logger.info("Server started on port #{test_port}") + # Give server time to fully initialize + Process.sleep(100) + + for client_adapter <- client_adapters do + client_name = client_adapter |> Module.split() |> List.last() + Logger.info("Starting run for client adapter: #{client_name}") + args = [client_adapter, test_port, rounds] + stream_opts = [max_concurrency: concurrency, ordered: false, timeout: :infinity] + + 1..concurrency + |> Task.async_stream(InteropTestRunner, :run, args, stream_opts) + |> Enum.to_list() + end + + :ok = GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: server_adapter) + Logger.info("Completed tests for #{server_name}") end -Logger.info("Succeed!") -:ok = GRPC.Server.stop_endpoint(Interop.Endpoint) +Logger.info("All tests succeeded!") +:ok diff --git a/interop/script/test_custom_metadata.exs b/interop/script/test_custom_metadata.exs new file mode 100644 index 000000000..cdb820e5c --- /dev/null +++ b/interop/script/test_custom_metadata.exs @@ -0,0 +1,34 @@ +# Test custom_metadata with debug output +require Logger +Logger.configure(level: :debug) + +{:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, 0, adapter: GRPC.Server.Adapters.ThousandIsland) +IO.puts("Server started on port #{test_port}") + +opts = [adapter: GRPC.Client.Adapters.Gun] +ch = Interop.Client.connect("127.0.0.1:#{test_port}", opts) + +payload = %Grpc.Testing.Payload{body: String.duplicate(<<0>>, 271_828)} +req = %Grpc.Testing.SimpleRequest{response_size: 314_159, payload: payload} +headers = %{"x-grpc-test-echo-initial" => "test_initial_metadata_value"} +trailers = %{"x-grpc-test-echo-trailing-bin" => 0xABABAB} +metadata = Map.merge(headers, trailers) + +IO.puts("\nSending request with metadata: #{inspect(metadata)}") + +result = Grpc.Testing.TestService.Stub.unary_call(ch, req, metadata: metadata, return_headers: true) + +IO.puts("\nReceived result:") +IO.inspect(result, label: "Result", pretty: true) + +case result do + {:ok, _reply, %{headers: recv_headers, trailers: recv_trailers}} -> + IO.puts("\nReceived headers:") + IO.inspect(recv_headers, label: "Headers", pretty: true) + IO.puts("\nReceived trailers:") + IO.inspect(recv_trailers, label: "Trailers", pretty: true) + _ -> + IO.puts("Unexpected result format") +end + +GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) diff --git a/interop/script/test_one.exs b/interop/script/test_one.exs new file mode 100644 index 000000000..6c4a30bdb --- /dev/null +++ b/interop/script/test_one.exs @@ -0,0 +1,43 @@ +{options, _, _} = + OptionParser.parse(System.argv(), + strict: [test: :string, port: :integer] + ) + +test_name = Keyword.get(options, :test) || "empty_unary" +port = Keyword.get(options, :port) || 0 + +require Logger +Logger.configure(level: :info) + +alias Interop.Client + +{:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, port, adapter: GRPC.Server.Adapters.ThousandIsland) +IO.puts("Server started on port #{test_port}") + +opts = [adapter: GRPC.Client.Adapters.Gun] +ch = Client.connect("127.0.0.1:#{test_port}", opts) + +try do + case test_name do + "empty_unary" -> Client.empty_unary!(ch) + "large_unary" -> Client.large_unary!(ch) + "client_streaming" -> Client.client_streaming!(ch) + "server_streaming" -> Client.server_streaming!(ch) + "ping_pong" -> Client.ping_pong!(ch) + "empty_stream" -> Client.empty_stream!(ch) + "custom_metadata" -> Client.custom_metadata!(ch) + "status_code_and_message" -> Client.status_code_and_message!(ch) + "unimplemented_service" -> Client.unimplemented_service!(ch) + "cancel_after_begin" -> Client.cancel_after_begin!(ch) + "timeout_on_sleeping_server" -> Client.timeout_on_sleeping_server!(ch) + _ -> IO.puts("Unknown test: #{test_name}") + end + IO.puts("✓ #{test_name} PASSED") +catch + kind, error -> + IO.puts("✗ #{test_name} FAILED") + IO.puts("Error: #{inspect(kind)} #{inspect(error)}") + IO.puts(Exception.format_stacktrace(__STACKTRACE__)) +end + +GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) diff --git a/mix.exs b/mix.exs index a227268a1..fce621d57 100644 --- a/mix.exs +++ b/mix.exs @@ -20,6 +20,7 @@ defmodule GRPC.GRPCRoot do defp aliases do [ + format: cmd("format"), setup: cmd("deps.get"), compile: cmd("compile"), test: cmd("test"), @@ -43,4 +44,4 @@ defmodule GRPC.GRPCRoot do end end -end +end \ No newline at end of file diff --git a/mix.lock b/mix.lock index 427f40b39..c2a78ae07 100644 --- a/mix.lock +++ b/mix.lock @@ -11,4 +11,5 @@ "protobuf": {:hex, :protobuf, "0.15.0", "c9fc1e9fc1682b05c601df536d5ff21877b55e2023e0466a3855cc1273b74dcb", [:mix], [{:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "5d7bb325319db1d668838d2691c31c7b793c34111aec87d5ee467a39dac6e051"}, "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"}, "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "thousand_island": {:hex, :thousand_island, "1.4.2", "735fa783005d1703359bbd2d3a5a3a398075ba4456e5afe3c5b7cf4666303d36", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1c7637f16558fc1c35746d5ee0e83b18b8e59e18d28affd1f2fa1645f8bc7473"}, } From 64d46585439d162567170e87dfe032953b33abf9 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 15:54:21 -0300 Subject: [PATCH 04/47] added elixir tools to .gitiignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 0a1640e8d..516591502 100644 --- a/.gitignore +++ b/.gitignore @@ -25,5 +25,8 @@ erl_crash.dump .elixir_ls .elixir_tools +grpc_client/.elixir-tools/ +grpc_core/.elixir-tools/ +grpc_server/.elixir-tools/ grpc-*.tar From 7bf5fe9c977ba08fccdb2e6f4d59454f56d2a1dd Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 18:09:37 -0300 Subject: [PATCH 05/47] chore: start client supervisor in application --- grpc_client/lib/grpc/client/application.ex | 13 ++++++++++ grpc_client/lib/grpc/client/connection.ex | 28 ++++++++-------------- grpc_client/mix.exs | 1 + grpc_client/test/test_helper.exs | 7 ------ 4 files changed, 24 insertions(+), 25 deletions(-) create mode 100644 grpc_client/lib/grpc/client/application.ex diff --git a/grpc_client/lib/grpc/client/application.ex b/grpc_client/lib/grpc/client/application.ex new file mode 100644 index 000000000..24c2d306d --- /dev/null +++ b/grpc_client/lib/grpc/client/application.ex @@ -0,0 +1,13 @@ +defmodule GRPC.Client.Application do + @moduledoc false + use Application + + def start(_type, _args) do + children = [ + {GRPC.Client.Supervisor, [name: GRPC.Client.Supervisor]} + ] + + opts = [strategy: :one_for_one, name: Grpc.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/grpc_client/lib/grpc/client/connection.ex b/grpc_client/lib/grpc/client/connection.ex index d880929cd..9431a18c7 100644 --- a/grpc_client/lib/grpc/client/connection.ex +++ b/grpc_client/lib/grpc/client/connection.ex @@ -153,25 +153,18 @@ defmodule GRPC.Client.Connection do """ @spec connect(String.t(), keyword()) :: {:ok, Channel.t()} | {:error, any()} def connect(target, opts \\ []) do - supervisor_pid = Process.whereis(GRPC.Client.Supervisor) + case Process.whereis(GRPC.Client.Supervisor) do + nil -> + # For compatibility purposes only. In normal operation, + # the supervisor should be started by the application. + {:ok, pid} = + DynamicSupervisor.start_link(__MODULE__, [], name: GRPC.Client.Supervisor) - if is_nil(supervisor_pid) or !Process.alive?(supervisor_pid) do - raise """ - GRPC.Client.Supervisor is not running. Please ensure it is started as part of your - application's supervision tree: - - children = [ - {GRPC.Client.Supervisor, []} - ] - - opts = [strategy: :one_for_one, name: MyApp.Supervisor] - Supervisor.start_link(children, opts) - - You can also start it manually in scripts or test environments: + pid - {:ok, _pid} = DynamicSupervisor.start_link(strategy: :one_for_one, name: GRPC.Client.Supervisor) - """ - end + pid -> + pid + end ref = make_ref() @@ -184,7 +177,6 @@ defmodule GRPC.Client.Connection do {:ok, ch} {:error, {:already_started, _pid}} -> - # race: someone else started it first, ask the running process for its current channel case pick_channel(opts) do {:ok, %Channel{} = channel} -> {:ok, channel} diff --git a/grpc_client/mix.exs b/grpc_client/mix.exs index 0fb4fcb64..7e01b3a38 100644 --- a/grpc_client/mix.exs +++ b/grpc_client/mix.exs @@ -22,6 +22,7 @@ defmodule GrpcClient.MixProject do def application do [ + mod: {GRPC.Client.Application, []}, extra_applications: [:logger] ] end diff --git a/grpc_client/test/test_helper.exs b/grpc_client/test/test_helper.exs index b98144e0c..5d228d323 100644 --- a/grpc_client/test/test_helper.exs +++ b/grpc_client/test/test_helper.exs @@ -5,11 +5,4 @@ Mox.defmock(GRPC.Client.Resolver.DNS.MockAdapter, for: GRPC.Client.Resolver.DNS.Adapter ) -# Start client supervisor for integration tests -{:ok, _pid} = - DynamicSupervisor.start_link( - strategy: :one_for_one, - name: GRPC.Client.Supervisor - ) - ExUnit.start(capture_log: true) From 4ff718e8977c7cd63588535f2471787e9588daec Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 18:12:07 -0300 Subject: [PATCH 06/47] doc: remove supervisor --- grpc_client/guides/advanced/load_balancing.md | 1 - grpc_client/guides/getting_started/client.md | 18 +----------------- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/grpc_client/guides/advanced/load_balancing.md b/grpc_client/guides/advanced/load_balancing.md index 272470f66..a222ab202 100644 --- a/grpc_client/guides/advanced/load_balancing.md +++ b/grpc_client/guides/advanced/load_balancing.md @@ -27,7 +27,6 @@ You can connect using `DNS`, `Unix Domain sockets`, and `IPv4/IPv6` for now. ### DNS ```elixir -iex> {:ok, _pid} = GRPC.Client.Supervisor.start_link() iex> {:ok, channel} = GRPC.Stub.connect("dns://orders.prod.svc.cluster.local:50051") iex> request = Orders.GetOrderRequest.new(id: "123") iex> {:ok, reply} = channel |> Orders.OrderService.Stub.get_order(request) diff --git a/grpc_client/guides/getting_started/client.md b/grpc_client/guides/getting_started/client.md index 1d748640b..41810da4f 100644 --- a/grpc_client/guides/getting_started/client.md +++ b/grpc_client/guides/getting_started/client.md @@ -6,23 +6,7 @@ This section demonstrates how to establish client connections and perform RPC ca ## Basic Connection and RPC -Typically, you start this client supervisor as part of your application's supervision tree: - -```elixir -children = [ - GRPC.Client.Supervisor -] - -opts = [strategy: :one_for_one, name: MyApp.Supervisor] -Supervisor.start_link(children, opts) -``` - -You can also start it manually in scripts or test environments: -```elixir -{:ok, _pid} = GRPC.Client.Supervisor.start_link() -``` - -Then connect with gRPC server: +Connect with gRPC server: ```elixir iex> {:ok, channel} = GRPC.Stub.connect("localhost:50051") From 9fcb5ffce895f4553211a817bdea41fdd74e47c4 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 8 Dec 2025 18:15:24 -0300 Subject: [PATCH 07/47] format --- grpc_client/lib/grpc/client/connection.ex | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/grpc_client/lib/grpc/client/connection.ex b/grpc_client/lib/grpc/client/connection.ex index 9431a18c7..8945a9b35 100644 --- a/grpc_client/lib/grpc/client/connection.ex +++ b/grpc_client/lib/grpc/client/connection.ex @@ -153,18 +153,18 @@ defmodule GRPC.Client.Connection do """ @spec connect(String.t(), keyword()) :: {:ok, Channel.t()} | {:error, any()} def connect(target, opts \\ []) do - case Process.whereis(GRPC.Client.Supervisor) do - nil -> - # For compatibility purposes only. In normal operation, - # the supervisor should be started by the application. - {:ok, pid} = - DynamicSupervisor.start_link(__MODULE__, [], name: GRPC.Client.Supervisor) + case Process.whereis(GRPC.Client.Supervisor) do + nil -> + # For compatibility purposes only. In normal operation, + # the supervisor should be started by the application. + {:ok, pid} = + DynamicSupervisor.start_link(__MODULE__, [], name: GRPC.Client.Supervisor) - pid + pid - pid -> - pid - end + pid -> + pid + end ref = make_ref() From 18e872784017f5d1d122e3a4509ada0ebf4df703 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 9 Dec 2025 09:52:59 -0300 Subject: [PATCH 08/47] chore: remove dead code --- benchmark/lib/benchmark/application.ex | 4 +--- grpc_client/lib/grpc/client/connection.ex | 13 ------------- interop/lib/interop/app.ex | 1 - 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/benchmark/lib/benchmark/application.ex b/benchmark/lib/benchmark/application.ex index 49e7a0d60..f665f02c9 100644 --- a/benchmark/lib/benchmark/application.ex +++ b/benchmark/lib/benchmark/application.ex @@ -4,9 +4,7 @@ defmodule Benchmark.Application do @impl true def start(_type, _args) do - children = [ - {GRPC.Client.Supervisor, []} - ] + children = [] opts = [strategy: :one_for_one, name: Benchmark.Supervisor] Supervisor.start_link(children, opts) diff --git a/grpc_client/lib/grpc/client/connection.ex b/grpc_client/lib/grpc/client/connection.ex index 8945a9b35..87d573a0e 100644 --- a/grpc_client/lib/grpc/client/connection.ex +++ b/grpc_client/lib/grpc/client/connection.ex @@ -153,19 +153,6 @@ defmodule GRPC.Client.Connection do """ @spec connect(String.t(), keyword()) :: {:ok, Channel.t()} | {:error, any()} def connect(target, opts \\ []) do - case Process.whereis(GRPC.Client.Supervisor) do - nil -> - # For compatibility purposes only. In normal operation, - # the supervisor should be started by the application. - {:ok, pid} = - DynamicSupervisor.start_link(__MODULE__, [], name: GRPC.Client.Supervisor) - - pid - - pid -> - pid - end - ref = make_ref() case build_initial_state(target, Keyword.merge(opts, ref: ref)) do diff --git a/interop/lib/interop/app.ex b/interop/lib/interop/app.ex index 9e880ab58..f67a072e9 100644 --- a/interop/lib/interop/app.ex +++ b/interop/lib/interop/app.ex @@ -3,7 +3,6 @@ defmodule Interop.App do def start(_type, _args) do children = [ - GRPC.Client.Supervisor, {GRPC.Server.Supervisor, endpoint: Interop.Endpoint, port: 10000} ] From 5ae1a34c6105fe712fd09ae187b22ee6a105f934 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 18:17:55 -0300 Subject: [PATCH 09/47] refactor: move HTTP/2 modules to transport namespace Move grpc_core/lib/grpc/http2 to grpc_core/lib/grpc/transport/http2 to better reflect that HTTP/2 is the transport layer for gRPC. Changes: - Rename GRPC.HTTP2.* to GRPC.Transport.HTTP2.* - Update all imports and aliases in grpc_server and grpc_client - Update all test files --- grpc_core/lib/grpc/http2/frame/rst_stream.ex | 33 ------------ .../lib/grpc/{ => transport}/http2/errors.ex | 2 +- .../{ => transport}/http2/flow_control.ex | 2 +- .../lib/grpc/{ => transport}/http2/frame.ex | 52 +++++++++---------- .../http2/frame/continuation.ex | 20 +++---- .../grpc/{ => transport}/http2/frame/data.ex | 22 ++++---- .../{ => transport}/http2/frame/goaway.ex | 16 +++--- .../{ => transport}/http2/frame/headers.ex | 24 ++++----- .../grpc/{ => transport}/http2/frame/ping.ex | 18 +++---- .../{ => transport}/http2/frame/priority.ex | 18 +++---- .../http2/frame/push_promise.ex | 20 +++---- .../grpc/transport/http2/frame/rst_stream.ex | 33 ++++++++++++ .../{ => transport}/http2/frame/settings.ex | 28 +++++----- .../{ => transport}/http2/frame/unknown.ex | 14 ++--- .../http2/frame/window_update.ex | 14 ++--- .../grpc/{ => transport}/http2/settings.ex | 2 +- .../adapters/thousand_island/handler.ex | 4 +- .../lib/grpc/server/http2/connection.ex | 6 +-- .../lib/grpc/server/http2/stream_state.ex | 4 +- .../grpc/server/http2/connection_test.exs | 2 +- .../test/grpc/server/http2/errors_test.exs | 4 +- .../grpc/server/http2/flow_control_test.exs | 4 +- .../test/grpc/server/http2/frame_test.exs | 4 +- .../test/grpc/server/http2/settings_test.exs | 4 +- 24 files changed, 175 insertions(+), 175 deletions(-) delete mode 100644 grpc_core/lib/grpc/http2/frame/rst_stream.ex rename grpc_core/lib/grpc/{ => transport}/http2/errors.ex (97%) rename grpc_core/lib/grpc/{ => transport}/http2/flow_control.ex (96%) rename grpc_core/lib/grpc/{ => transport}/http2/frame.ex (52%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/continuation.ex (59%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/data.ex (69%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/goaway.ex (56%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/headers.ex (82%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/ping.ex (54%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/priority.ex (62%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/push_promise.ex (70%) create mode 100644 grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex rename grpc_core/lib/grpc/{ => transport}/http2/frame/settings.ex (71%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/unknown.ex (56%) rename grpc_core/lib/grpc/{ => transport}/http2/frame/window_update.ex (53%) rename grpc_core/lib/grpc/{ => transport}/http2/settings.ex (93%) diff --git a/grpc_core/lib/grpc/http2/frame/rst_stream.ex b/grpc_core/lib/grpc/http2/frame/rst_stream.ex deleted file mode 100644 index 499f8b6b5..000000000 --- a/grpc_core/lib/grpc/http2/frame/rst_stream.ex +++ /dev/null @@ -1,33 +0,0 @@ -defmodule GRPC.HTTP2.Frame.RstStream do - @moduledoc false - - defstruct stream_id: nil, error_code: nil - - @typedoc "An HTTP/2 RST_STREAM frame" - @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), - error_code: GRPC.HTTP2.Errors.error_code() - } - - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} - def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), - "RST_STREAM frame with zero stream_id (RFC9113§6.4)"} - end - - def deserialize(_flags, stream_id, <>) do - {:ok, %__MODULE__{stream_id: stream_id, error_code: error_code}} - end - - def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.HTTP2.Errors.frame_size_error(), - "Invalid payload size in RST_STREAM frame (RFC9113§6.4)"} - end - - defimpl GRPC.HTTP2.Frame.Serializable do - def serialize(%GRPC.HTTP2.Frame.RstStream{} = frame, _max_frame_size) do - [{0x3, 0x0, frame.stream_id, <>}] - end - end -end diff --git a/grpc_core/lib/grpc/http2/errors.ex b/grpc_core/lib/grpc/transport/http2/errors.ex similarity index 97% rename from grpc_core/lib/grpc/http2/errors.ex rename to grpc_core/lib/grpc/transport/http2/errors.ex index eed81935a..f4798dbe1 100644 --- a/grpc_core/lib/grpc/http2/errors.ex +++ b/grpc_core/lib/grpc/transport/http2/errors.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Errors do +defmodule GRPC.Transport.HTTP2.Errors do @moduledoc false # Errors as defined in RFC9113§7 diff --git a/grpc_core/lib/grpc/http2/flow_control.ex b/grpc_core/lib/grpc/transport/http2/flow_control.ex similarity index 96% rename from grpc_core/lib/grpc/http2/flow_control.ex rename to grpc_core/lib/grpc/transport/http2/flow_control.ex index 5ac340fc7..d20065471 100644 --- a/grpc_core/lib/grpc/http2/flow_control.ex +++ b/grpc_core/lib/grpc/transport/http2/flow_control.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.FlowControl do +defmodule GRPC.Transport.HTTP2.FlowControl do @moduledoc false # Helpers for working with flow control window calculations diff --git a/grpc_core/lib/grpc/http2/frame.ex b/grpc_core/lib/grpc/transport/http2/frame.ex similarity index 52% rename from grpc_core/lib/grpc/http2/frame.ex rename to grpc_core/lib/grpc/transport/http2/frame.ex index 32ffe6eb7..5a348fb03 100644 --- a/grpc_core/lib/grpc/http2/frame.ex +++ b/grpc_core/lib/grpc/transport/http2/frame.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Frame do +defmodule GRPC.Transport.HTTP2.Frame do @moduledoc false # HTTP/2 frame parsing and serialization adapted from Bandit @@ -10,21 +10,21 @@ defmodule GRPC.HTTP2.Frame do @typedoc "A valid HTTP/2 frame" @type frame :: - GRPC.HTTP2.Frame.Data.t() - | GRPC.HTTP2.Frame.Headers.t() - | GRPC.HTTP2.Frame.Priority.t() - | GRPC.HTTP2.Frame.RstStream.t() - | GRPC.HTTP2.Frame.Settings.t() - | GRPC.HTTP2.Frame.Ping.t() - | GRPC.HTTP2.Frame.Goaway.t() - | GRPC.HTTP2.Frame.WindowUpdate.t() - | GRPC.HTTP2.Frame.Continuation.t() - | GRPC.HTTP2.Frame.Unknown.t() + GRPC.Transport.HTTP2.Frame.Data.t() + | GRPC.Transport.HTTP2.Frame.Headers.t() + | GRPC.Transport.HTTP2.Frame.Priority.t() + | GRPC.Transport.HTTP2.Frame.RstStream.t() + | GRPC.Transport.HTTP2.Frame.Settings.t() + | GRPC.Transport.HTTP2.Frame.Ping.t() + | GRPC.Transport.HTTP2.Frame.Goaway.t() + | GRPC.Transport.HTTP2.Frame.WindowUpdate.t() + | GRPC.Transport.HTTP2.Frame.Continuation.t() + | GRPC.Transport.HTTP2.Frame.Unknown.t() @spec deserialize(binary(), non_neg_integer()) :: {{:ok, frame()}, iodata()} | {{:more, iodata()}, <<>>} - | {{:error, GRPC.HTTP2.Errors.error_code(), binary()}, iodata()} + | {{:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()}, iodata()} | nil def deserialize( < case do - 0x0 -> GRPC.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) - 0x1 -> GRPC.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) - 0x2 -> GRPC.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) - 0x3 -> GRPC.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) - 0x4 -> GRPC.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) - 0x5 -> GRPC.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) - 0x6 -> GRPC.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) - 0x7 -> GRPC.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) - 0x8 -> GRPC.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) - 0x9 -> GRPC.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) - _unknown -> GRPC.HTTP2.Frame.Unknown.deserialize(type, flags, stream_id, payload) + 0x0 -> GRPC.Transport.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) + 0x1 -> GRPC.Transport.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) + 0x2 -> GRPC.Transport.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) + 0x3 -> GRPC.Transport.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) + 0x4 -> GRPC.Transport.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) + 0x5 -> GRPC.Transport.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) + 0x6 -> GRPC.Transport.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) + 0x7 -> GRPC.Transport.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) + 0x8 -> GRPC.Transport.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) + 0x9 -> GRPC.Transport.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) + _unknown -> GRPC.Transport.HTTP2.Frame.Unknown.deserialize(type, flags, stream_id, payload) end |> case do {:ok, frame} -> {{:ok, frame}, rest} @@ -57,7 +57,7 @@ defmodule GRPC.HTTP2.Frame do max_frame_size ) when length > max_frame_size do - {{:error, GRPC.HTTP2.Errors.frame_size_error(), "Payload size too large (RFC9113§4.2)"}, rest} + {{:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Payload size too large (RFC9113§4.2)"}, rest} end # nil is used to indicate for Stream.unfold/2 that the frame deserialization is finished @@ -86,8 +86,8 @@ defmodule GRPC.HTTP2.Frame do @moduledoc false @spec serialize(any(), non_neg_integer()) :: [ - {GRPC.HTTP2.Frame.frame_type(), GRPC.HTTP2.Frame.flags(), - GRPC.HTTP2.Stream.stream_id(), iodata()} + {GRPC.Transport.HTTP2.Frame.frame_type(), GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), iodata()} ] def serialize(frame, max_frame_size) end diff --git a/grpc_core/lib/grpc/http2/frame/continuation.ex b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex similarity index 59% rename from grpc_core/lib/grpc/http2/frame/continuation.ex rename to grpc_core/lib/grpc/transport/http2/frame/continuation.ex index 3dde8bc55..6db0c11dd 100644 --- a/grpc_core/lib/grpc/http2/frame/continuation.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.Continuation do +defmodule GRPC.Transport.HTTP2.Frame.Continuation do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags defstruct stream_id: nil, end_headers: false, @@ -9,17 +9,17 @@ defmodule GRPC.HTTP2.Frame.Continuation do @typedoc "An HTTP/2 CONTINUATION frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), end_headers: boolean(), fragment: iodata() } @end_headers_bit 2 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "CONTINUATION frame with zero stream_id (RFC9113§6.10)"} end @@ -32,10 +32,10 @@ defmodule GRPC.HTTP2.Frame.Continuation do }} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @end_headers_bit 2 - def serialize(%GRPC.HTTP2.Frame.Continuation{} = frame, max_frame_size) do + def serialize(%GRPC.Transport.HTTP2.Frame.Continuation{} = frame, max_frame_size) do fragment_length = IO.iodata_length(frame.fragment) if fragment_length <= max_frame_size do @@ -46,8 +46,8 @@ defmodule GRPC.HTTP2.Frame.Continuation do [ {0x9, 0x00, frame.stream_id, this_frame} - | GRPC.HTTP2.Frame.Serializable.serialize( - %GRPC.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + | GRPC.Transport.HTTP2.Frame.Serializable.serialize( + %GRPC.Transport.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, max_frame_size ) ] diff --git a/grpc_core/lib/grpc/http2/frame/data.ex b/grpc_core/lib/grpc/transport/http2/frame/data.ex similarity index 69% rename from grpc_core/lib/grpc/http2/frame/data.ex rename to grpc_core/lib/grpc/transport/http2/frame/data.ex index 29a7e05e3..9f7b6e369 100644 --- a/grpc_core/lib/grpc/http2/frame/data.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/data.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.Data do +defmodule GRPC.Transport.HTTP2.Frame.Data do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags defstruct stream_id: nil, end_stream: false, @@ -9,7 +9,7 @@ defmodule GRPC.HTTP2.Frame.Data do @typedoc "An HTTP/2 DATA frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), end_stream: boolean(), data: iodata() } @@ -17,10 +17,10 @@ defmodule GRPC.HTTP2.Frame.Data do @end_stream_bit 0 @padding_bit 3 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), "DATA frame with zero stream_id (RFC9113§6.1)"} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "DATA frame with zero stream_id (RFC9113§6.1)"} end def deserialize(flags, stream_id, <>) @@ -44,14 +44,14 @@ defmodule GRPC.HTTP2.Frame.Data do def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) when set?(flags, @padding_bit) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "DATA frame with invalid padding length (RFC9113§6.1)"} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @end_stream_bit 0 - def serialize(%GRPC.HTTP2.Frame.Data{} = frame, max_frame_size) do + def serialize(%GRPC.Transport.HTTP2.Frame.Data{} = frame, max_frame_size) do data_length = IO.iodata_length(frame.data) if data_length <= max_frame_size do @@ -63,8 +63,8 @@ defmodule GRPC.HTTP2.Frame.Data do [ {0x0, 0x00, frame.stream_id, this_frame} - | GRPC.HTTP2.Frame.Serializable.serialize( - %GRPC.HTTP2.Frame.Data{ + | GRPC.Transport.HTTP2.Frame.Serializable.serialize( + %GRPC.Transport.HTTP2.Frame.Data{ stream_id: frame.stream_id, end_stream: frame.end_stream, data: rest diff --git a/grpc_core/lib/grpc/http2/frame/goaway.ex b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex similarity index 56% rename from grpc_core/lib/grpc/http2/frame/goaway.ex rename to grpc_core/lib/grpc/transport/http2/frame/goaway.ex index cd4620ca7..c595d6d98 100644 --- a/grpc_core/lib/grpc/http2/frame/goaway.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex @@ -1,17 +1,17 @@ -defmodule GRPC.HTTP2.Frame.Goaway do +defmodule GRPC.Transport.HTTP2.Frame.Goaway do @moduledoc false defstruct last_stream_id: 0, error_code: 0, debug_data: <<>> @typedoc "An HTTP/2 GOAWAY frame" @type t :: %__MODULE__{ - last_stream_id: GRPC.HTTP2.Stream.stream_id(), - error_code: GRPC.HTTP2.Errors.error_code(), + last_stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), + error_code: GRPC.Transport.HTTP2.Errors.error_code(), debug_data: iodata() } - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize( _flags, 0, @@ -22,12 +22,12 @@ defmodule GRPC.HTTP2.Frame.Goaway do end def deserialize(_flags, stream_id, _payload) when stream_id != 0 do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "GOAWAY frame with non-zero stream_id (RFC9113§6.8)"} end - defimpl GRPC.HTTP2.Frame.Serializable do - def serialize(%GRPC.HTTP2.Frame.Goaway{} = frame, _max_frame_size) do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do + def serialize(%GRPC.Transport.HTTP2.Frame.Goaway{} = frame, _max_frame_size) do payload = <<0::1, frame.last_stream_id::31, frame.error_code::32, frame.debug_data::binary>> [{0x7, 0x0, 0, payload}] end diff --git a/grpc_core/lib/grpc/http2/frame/headers.ex b/grpc_core/lib/grpc/transport/http2/frame/headers.ex similarity index 82% rename from grpc_core/lib/grpc/http2/frame/headers.ex rename to grpc_core/lib/grpc/transport/http2/frame/headers.ex index 9661962d9..d6f160c31 100644 --- a/grpc_core/lib/grpc/http2/frame/headers.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/headers.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.Headers do +defmodule GRPC.Transport.HTTP2.Frame.Headers do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags defstruct stream_id: nil, end_stream: false, @@ -13,11 +13,11 @@ defmodule GRPC.HTTP2.Frame.Headers do @typedoc "An HTTP/2 HEADERS frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), end_stream: boolean(), end_headers: boolean(), exclusive_dependency: boolean(), - stream_dependency: GRPC.HTTP2.Stream.stream_id() | nil, + stream_dependency: GRPC.Transport.HTTP2.Stream.stream_id() | nil, weight: non_neg_integer() | nil, fragment: iodata() } @@ -27,10 +27,10 @@ defmodule GRPC.HTTP2.Frame.Headers do @padding_bit 3 @priority_bit 5 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "HEADERS frame with zero stream_id (RFC9113§6.2)"} end @@ -71,7 +71,7 @@ defmodule GRPC.HTTP2.Frame.Headers do # Any other case where padding is set def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) when set?(flags, @padding_bit) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "HEADERS frame with invalid padding length (RFC9113§6.2)"} end @@ -104,12 +104,12 @@ defmodule GRPC.HTTP2.Frame.Headers do }} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @end_stream_bit 0 @end_headers_bit 2 def serialize( - %GRPC.HTTP2.Frame.Headers{ + %GRPC.Transport.HTTP2.Frame.Headers{ exclusive_dependency: false, stream_dependency: nil, weight: nil @@ -129,8 +129,8 @@ defmodule GRPC.HTTP2.Frame.Headers do [ {0x1, set(flags), frame.stream_id, this_frame} - | GRPC.HTTP2.Frame.Serializable.serialize( - %GRPC.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + | GRPC.Transport.HTTP2.Frame.Serializable.serialize( + %GRPC.Transport.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, max_frame_size ) ] diff --git a/grpc_core/lib/grpc/http2/frame/ping.ex b/grpc_core/lib/grpc/transport/http2/frame/ping.ex similarity index 54% rename from grpc_core/lib/grpc/http2/frame/ping.ex rename to grpc_core/lib/grpc/transport/http2/frame/ping.ex index 552dfd8d2..d0a58c765 100644 --- a/grpc_core/lib/grpc/http2/frame/ping.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/ping.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.Ping do +defmodule GRPC.Transport.HTTP2.Frame.Ping do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags defstruct ack: false, payload: nil @@ -13,8 +13,8 @@ defmodule GRPC.HTTP2.Frame.Ping do @ack_bit 0 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(flags, 0, <>) when set?(flags, @ack_bit) do {:ok, %__MODULE__{ack: true, payload: payload}} end @@ -24,21 +24,21 @@ defmodule GRPC.HTTP2.Frame.Ping do end def deserialize(_flags, stream_id, _payload) when stream_id != 0 do - {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid stream ID in PING frame (RFC9113§6.7)"} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid stream ID in PING frame (RFC9113§6.7)"} end def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.HTTP2.Errors.frame_size_error(), + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "PING frame with invalid payload size (RFC9113§6.7)"} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @ack_bit 0 - def serialize(%GRPC.HTTP2.Frame.Ping{ack: true} = frame, _max_frame_size), + def serialize(%GRPC.Transport.HTTP2.Frame.Ping{ack: true} = frame, _max_frame_size), do: [{0x6, set([@ack_bit]), 0, frame.payload}] - def serialize(%GRPC.HTTP2.Frame.Ping{ack: false} = frame, _max_frame_size), + def serialize(%GRPC.Transport.HTTP2.Frame.Ping{ack: false} = frame, _max_frame_size), do: [{0x6, 0x0, 0, frame.payload}] end end diff --git a/grpc_core/lib/grpc/http2/frame/priority.ex b/grpc_core/lib/grpc/transport/http2/frame/priority.ex similarity index 62% rename from grpc_core/lib/grpc/http2/frame/priority.ex rename to grpc_core/lib/grpc/transport/http2/frame/priority.ex index 73db02c72..b82b9eae8 100644 --- a/grpc_core/lib/grpc/http2/frame/priority.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/priority.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Frame.Priority do +defmodule GRPC.Transport.HTTP2.Frame.Priority do @moduledoc false defstruct stream_id: nil, @@ -8,16 +8,16 @@ defmodule GRPC.HTTP2.Frame.Priority do @typedoc "An HTTP/2 PRIORITY frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), exclusive_dependency: boolean(), - stream_dependency: GRPC.HTTP2.Stream.stream_id(), + stream_dependency: GRPC.Transport.HTTP2.Stream.stream_id(), weight: non_neg_integer() } - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "PRIORITY frame with zero stream_id (RFC9113§6.3)"} end @@ -36,12 +36,12 @@ defmodule GRPC.HTTP2.Frame.Priority do end def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.HTTP2.Errors.frame_size_error(), + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid payload size in PRIORITY frame (RFC9113§6.3)"} end - defimpl GRPC.HTTP2.Frame.Serializable do - def serialize(%GRPC.HTTP2.Frame.Priority{} = frame, _max_frame_size) do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do + def serialize(%GRPC.Transport.HTTP2.Frame.Priority{} = frame, _max_frame_size) do exclusive = if frame.exclusive_dependency, do: 0x01, else: 0x00 payload = <> [{0x2, 0x0, frame.stream_id, payload}] diff --git a/grpc_core/lib/grpc/http2/frame/push_promise.ex b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex similarity index 70% rename from grpc_core/lib/grpc/http2/frame/push_promise.ex rename to grpc_core/lib/grpc/transport/http2/frame/push_promise.ex index b5a0b681f..b9bbbf062 100644 --- a/grpc_core/lib/grpc/http2/frame/push_promise.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.PushPromise do +defmodule GRPC.Transport.HTTP2.Frame.PushPromise do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags defstruct stream_id: nil, end_headers: false, @@ -10,19 +10,19 @@ defmodule GRPC.HTTP2.Frame.PushPromise do @typedoc "An HTTP/2 PUSH_PROMISE frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), end_headers: boolean(), - promised_stream_id: GRPC.HTTP2.Stream.stream_id(), + promised_stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), fragment: iodata() } @end_headers_bit 2 @padding_bit 3 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "PUSH_PROMISE frame with zero stream_id (RFC9113§6.6)"} end @@ -54,14 +54,14 @@ defmodule GRPC.HTTP2.Frame.PushPromise do def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>) when set?(flags, @padding_bit) do - {:error, GRPC.HTTP2.Errors.protocol_error(), + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "PUSH_PROMISE frame with invalid padding length (RFC9113§6.6)"} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @end_headers_bit 2 - def serialize(%GRPC.HTTP2.Frame.PushPromise{} = frame, _max_frame_size) do + def serialize(%GRPC.Transport.HTTP2.Frame.PushPromise{} = frame, _max_frame_size) do payload = <<0::1, frame.promised_stream_id::31, frame.fragment::binary>> [{0x5, set([@end_headers_bit]), frame.stream_id, payload}] end diff --git a/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex new file mode 100644 index 000000000..0be9289e1 --- /dev/null +++ b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex @@ -0,0 +1,33 @@ +defmodule GRPC.Transport.HTTP2.Frame.RstStream do + @moduledoc false + + defstruct stream_id: nil, error_code: nil + + @typedoc "An HTTP/2 RST_STREAM frame" + @type t :: %__MODULE__{ + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), + error_code: GRPC.Transport.HTTP2.Errors.error_code() + } + + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} + def deserialize(_flags, 0, _payload) do + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), + "RST_STREAM frame with zero stream_id (RFC9113§6.4)"} + end + + def deserialize(_flags, stream_id, <>) do + {:ok, %__MODULE__{stream_id: stream_id, error_code: error_code}} + end + + def deserialize(_flags, _stream_id, _payload) do + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Invalid payload size in RST_STREAM frame (RFC9113§6.4)"} + end + + defimpl GRPC.Transport.HTTP2.Frame.Serializable do + def serialize(%GRPC.Transport.HTTP2.Frame.RstStream{} = frame, _max_frame_size) do + [{0x3, 0x0, frame.stream_id, <>}] + end + end +end diff --git a/grpc_core/lib/grpc/http2/frame/settings.ex b/grpc_core/lib/grpc/transport/http2/frame/settings.ex similarity index 71% rename from grpc_core/lib/grpc/http2/frame/settings.ex rename to grpc_core/lib/grpc/transport/http2/frame/settings.ex index dc52f5757..f359ce324 100644 --- a/grpc_core/lib/grpc/http2/frame/settings.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/settings.ex @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.Frame.Settings do +defmodule GRPC.Transport.HTTP2.Frame.Settings do @moduledoc false - import GRPC.HTTP2.Frame.Flags + import GRPC.Transport.HTTP2.Frame.Flags import Bitwise @max_window_size (1 <<< 31) - 1 @@ -15,8 +15,8 @@ defmodule GRPC.HTTP2.Frame.Settings do @ack_bit 0 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(flags, 0, payload) when clear?(flags, @ack_bit) do payload |> Stream.unfold(fn @@ -33,25 +33,25 @@ defmodule GRPC.HTTP2.Frame.Settings do {:ok, {0x02, _value}}, {:ok, _acc} -> {:halt, - {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} {:ok, {0x03, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_concurrent_streams, value)}} {:ok, {0x04, value}}, {:ok, _acc} when value > @max_window_size -> {:halt, - {:error, GRPC.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} {:ok, {0x04, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :initial_window_size, value)}} {:ok, {0x05, value}}, {:ok, _acc} when value < @min_frame_size -> {:halt, - {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} {:ok, {0x05, value}}, {:ok, _acc} when value > @max_frame_size -> {:halt, - {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} {:ok, {0x05, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_frame_size, value)}} @@ -64,7 +64,7 @@ defmodule GRPC.HTTP2.Frame.Settings do {:error, _rest}, _acc -> {:halt, - {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid SETTINGS size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid SETTINGS size (RFC9113§6.5)"}} end) |> case do {:ok, settings} -> {:ok, %__MODULE__{ack: false, settings: settings}} @@ -77,21 +77,21 @@ defmodule GRPC.HTTP2.Frame.Settings do end def deserialize(flags, 0, _payload) when set?(flags, @ack_bit) do - {:error, GRPC.HTTP2.Errors.frame_size_error(), + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "SETTINGS ack frame with non-empty payload (RFC9113§6.5)"} end def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.HTTP2.Errors.protocol_error(), "Invalid SETTINGS frame (RFC9113§6.5)"} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid SETTINGS frame (RFC9113§6.5)"} end - defimpl GRPC.HTTP2.Frame.Serializable do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do @ack_bit 0 - def serialize(%GRPC.HTTP2.Frame.Settings{ack: true}, _max_frame_size), + def serialize(%GRPC.Transport.HTTP2.Frame.Settings{ack: true}, _max_frame_size), do: [{0x4, set([@ack_bit]), 0, <<>>}] - def serialize(%GRPC.HTTP2.Frame.Settings{ack: false} = frame, _max_frame_size) do + def serialize(%GRPC.Transport.HTTP2.Frame.Settings{ack: false} = frame, _max_frame_size) do payload = frame.settings |> Enum.uniq_by(fn {setting, _} -> setting end) diff --git a/grpc_core/lib/grpc/http2/frame/unknown.ex b/grpc_core/lib/grpc/transport/http2/frame/unknown.ex similarity index 56% rename from grpc_core/lib/grpc/http2/frame/unknown.ex rename to grpc_core/lib/grpc/transport/http2/frame/unknown.ex index c6f36f19e..308d89061 100644 --- a/grpc_core/lib/grpc/http2/frame/unknown.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/unknown.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Frame.Unknown do +defmodule GRPC.Transport.HTTP2.Frame.Unknown do @moduledoc false defstruct type: nil, @@ -8,17 +8,17 @@ defmodule GRPC.HTTP2.Frame.Unknown do @typedoc "An HTTP/2 frame of unknown type" @type t :: %__MODULE__{ - type: GRPC.HTTP2.Frame.frame_type(), - flags: GRPC.HTTP2.Frame.flags(), - stream_id: GRPC.HTTP2.Stream.stream_id(), + type: GRPC.Transport.HTTP2.Frame.frame_type(), + flags: GRPC.Transport.HTTP2.Frame.flags(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), payload: iodata() } # Note this is arity 4 @spec deserialize( - GRPC.HTTP2.Frame.frame_type(), - GRPC.HTTP2.Frame.flags(), - GRPC.HTTP2.Stream.stream_id(), + GRPC.Transport.HTTP2.Frame.frame_type(), + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), iodata() ) :: {:ok, t()} def deserialize(type, flags, stream_id, payload) do diff --git a/grpc_core/lib/grpc/http2/frame/window_update.ex b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex similarity index 53% rename from grpc_core/lib/grpc/http2/frame/window_update.ex rename to grpc_core/lib/grpc/transport/http2/frame/window_update.ex index ac8adcfb5..cf9924fdf 100644 --- a/grpc_core/lib/grpc/http2/frame/window_update.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Frame.WindowUpdate do +defmodule GRPC.Transport.HTTP2.Frame.WindowUpdate do @moduledoc false import Bitwise @@ -7,25 +7,25 @@ defmodule GRPC.HTTP2.Frame.WindowUpdate do @typedoc "An HTTP/2 WINDOW_UPDATE frame" @type t :: %__MODULE__{ - stream_id: GRPC.HTTP2.Stream.stream_id(), + stream_id: GRPC.Transport.HTTP2.Stream.stream_id(), size_increment: non_neg_integer() } @max_window_increment (1 <<< 31) - 1 - @spec deserialize(GRPC.HTTP2.Frame.flags(), GRPC.HTTP2.Stream.stream_id(), iodata()) :: - {:ok, t()} | {:error, GRPC.HTTP2.Errors.error_code(), binary()} + @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, stream_id, <<_reserved::1, size_increment::31>>) when size_increment > 0 and size_increment <= @max_window_increment do {:ok, %__MODULE__{stream_id: stream_id, size_increment: size_increment}} end def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.HTTP2.Errors.frame_size_error(), "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} end - defimpl GRPC.HTTP2.Frame.Serializable do - def serialize(%GRPC.HTTP2.Frame.WindowUpdate{} = frame, _max_frame_size) do + defimpl GRPC.Transport.HTTP2.Frame.Serializable do + def serialize(%GRPC.Transport.HTTP2.Frame.WindowUpdate{} = frame, _max_frame_size) do [{0x8, 0x0, frame.stream_id, <<0::1, frame.size_increment::31>>}] end end diff --git a/grpc_core/lib/grpc/http2/settings.ex b/grpc_core/lib/grpc/transport/http2/settings.ex similarity index 93% rename from grpc_core/lib/grpc/http2/settings.ex rename to grpc_core/lib/grpc/transport/http2/settings.ex index b0c015672..cf03b6baf 100644 --- a/grpc_core/lib/grpc/http2/settings.ex +++ b/grpc_core/lib/grpc/transport/http2/settings.ex @@ -1,4 +1,4 @@ -defmodule GRPC.HTTP2.Settings do +defmodule GRPC.Transport.HTTP2.Settings do @moduledoc """ Settings as defined in RFC9113§6.5.2 """ diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index 008c78480..c4012546f 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -7,8 +7,8 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do use ThousandIsland.Handler alias GRPC.Server.HTTP2.Connection - alias GRPC.HTTP2.Frame - alias GRPC.HTTP2.Errors + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors require Logger # HTTP/2 connection preface per RFC9113§3.4 diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex index 62affeb56..392b3e9fe 100644 --- a/grpc_server/lib/grpc/server/http2/connection.ex +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -4,9 +4,9 @@ defmodule GRPC.Server.HTTP2.Connection do """ require Logger - alias GRPC.HTTP2.Frame - alias GRPC.HTTP2.Settings - alias GRPC.HTTP2.Errors + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Settings + alias GRPC.Transport.HTTP2.Errors alias GRPC.Server.HTTP2.StreamState # Inline hot path functions for performance diff --git a/grpc_server/lib/grpc/server/http2/stream_state.ex b/grpc_server/lib/grpc/server/http2/stream_state.ex index 11673238e..78d8b45d0 100644 --- a/grpc_server/lib/grpc/server/http2/stream_state.ex +++ b/grpc_server/lib/grpc/server/http2/stream_state.ex @@ -9,7 +9,7 @@ defmodule GRPC.Server.HTTP2.StreamState do - Manages stream lifecycle (HEADERS -> DATA -> trailers) """ - alias GRPC.HTTP2.Frame + alias GRPC.Transport.HTTP2.Frame @type stream_id :: pos_integer() @type state :: :idle | :open | :half_closed_local | :half_closed_remote | :closed @@ -246,7 +246,7 @@ defmodule GRPC.Server.HTTP2.StreamState do """ @spec update_window(t(), integer()) :: {:ok, t()} | {:error, :flow_control_error} def update_window(stream, increment) do - case GRPC.HTTP2.FlowControl.update_window(stream.window_size, increment) do + case GRPC.Transport.HTTP2.FlowControl.update_window(stream.window_size, increment) do {:ok, new_size} -> {:ok, %{stream | window_size: new_size}} diff --git a/grpc_server/test/grpc/server/http2/connection_test.exs b/grpc_server/test/grpc/server/http2/connection_test.exs index 1cd3a4b95..3ecae0437 100644 --- a/grpc_server/test/grpc/server/http2/connection_test.exs +++ b/grpc_server/test/grpc/server/http2/connection_test.exs @@ -2,7 +2,7 @@ defmodule GRPC.Server.HTTP2.ConnectionTest do use ExUnit.Case, async: true alias GRPC.Server.HTTP2.Connection - alias GRPC.HTTP2.{Frame, Settings, Errors} + alias GRPC.Transport.HTTP2.{Frame, Settings, Errors} # For now, we'll test without mocking the socket # Just test the logic of handle_frame functions diff --git a/grpc_server/test/grpc/server/http2/errors_test.exs b/grpc_server/test/grpc/server/http2/errors_test.exs index 208e7242e..c1e323875 100644 --- a/grpc_server/test/grpc/server/http2/errors_test.exs +++ b/grpc_server/test/grpc/server/http2/errors_test.exs @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.ErrorsTest do +defmodule GRPC.Transport.HTTP2.ErrorsTest do use ExUnit.Case, async: true - alias GRPC.HTTP2.Errors + alias GRPC.Transport.HTTP2.Errors describe "error codes" do test "no_error returns 0x0" do diff --git a/grpc_server/test/grpc/server/http2/flow_control_test.exs b/grpc_server/test/grpc/server/http2/flow_control_test.exs index 6b4fa997a..b78885b7f 100644 --- a/grpc_server/test/grpc/server/http2/flow_control_test.exs +++ b/grpc_server/test/grpc/server/http2/flow_control_test.exs @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.FlowControlTest do +defmodule GRPC.Transport.HTTP2.FlowControlTest do use ExUnit.Case, async: true - alias GRPC.HTTP2.FlowControl + alias GRPC.Transport.HTTP2.FlowControl import Bitwise diff --git a/grpc_server/test/grpc/server/http2/frame_test.exs b/grpc_server/test/grpc/server/http2/frame_test.exs index a2b61df77..f5d01b709 100644 --- a/grpc_server/test/grpc/server/http2/frame_test.exs +++ b/grpc_server/test/grpc/server/http2/frame_test.exs @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.FrameTest do +defmodule GRPC.Transport.HTTP2.FrameTest do use ExUnit.Case, async: true - alias GRPC.HTTP2.{Frame, Errors} + alias GRPC.Transport.HTTP2.{Frame, Errors} describe "frame deserialization" do test "deserializes DATA frames" do diff --git a/grpc_server/test/grpc/server/http2/settings_test.exs b/grpc_server/test/grpc/server/http2/settings_test.exs index 8d25105aa..c58f37243 100644 --- a/grpc_server/test/grpc/server/http2/settings_test.exs +++ b/grpc_server/test/grpc/server/http2/settings_test.exs @@ -1,7 +1,7 @@ -defmodule GRPC.HTTP2.SettingsTest do +defmodule GRPC.Transport.HTTP2.SettingsTest do use ExUnit.Case, async: true - alias GRPC.HTTP2.Settings + alias GRPC.Transport.HTTP2.Settings describe "default settings" do test "has correct default values" do From 2b5979d47ad10954d836410d69ee10e0d395849b Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 18:19:25 -0300 Subject: [PATCH 10/47] formatting --- grpc_core/lib/grpc/transport/http2/frame.ex | 3 ++- .../transport/http2/frame/continuation.ex | 11 ++++++++-- .../lib/grpc/transport/http2/frame/data.ex | 9 ++++++-- .../lib/grpc/transport/http2/frame/goaway.ex | 6 +++++- .../lib/grpc/transport/http2/frame/headers.ex | 11 ++++++++-- .../lib/grpc/transport/http2/frame/ping.ex | 9 ++++++-- .../grpc/transport/http2/frame/priority.ex | 6 +++++- .../transport/http2/frame/push_promise.ex | 6 +++++- .../grpc/transport/http2/frame/rst_stream.ex | 6 +++++- .../grpc/transport/http2/frame/settings.ex | 21 +++++++++++++------ .../transport/http2/frame/window_update.ex | 9 ++++++-- 11 files changed, 76 insertions(+), 21 deletions(-) diff --git a/grpc_core/lib/grpc/transport/http2/frame.ex b/grpc_core/lib/grpc/transport/http2/frame.ex index 5a348fb03..a93a3ba91 100644 --- a/grpc_core/lib/grpc/transport/http2/frame.ex +++ b/grpc_core/lib/grpc/transport/http2/frame.ex @@ -57,7 +57,8 @@ defmodule GRPC.Transport.HTTP2.Frame do max_frame_size ) when length > max_frame_size do - {{:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Payload size too large (RFC9113§4.2)"}, rest} + {{:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Payload size too large (RFC9113§4.2)"}, rest} end # nil is used to indicate for Stream.unfold/2 that the frame deserialization is finished diff --git a/grpc_core/lib/grpc/transport/http2/frame/continuation.ex b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex index 6db0c11dd..6c5195e45 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/continuation.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex @@ -16,7 +16,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Continuation do @end_headers_bit 2 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), @@ -47,7 +51,10 @@ defmodule GRPC.Transport.HTTP2.Frame.Continuation do [ {0x9, 0x00, frame.stream_id, this_frame} | GRPC.Transport.HTTP2.Frame.Serializable.serialize( - %GRPC.Transport.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + %GRPC.Transport.HTTP2.Frame.Continuation{ + stream_id: frame.stream_id, + fragment: rest + }, max_frame_size ) ] diff --git a/grpc_core/lib/grpc/transport/http2/frame/data.ex b/grpc_core/lib/grpc/transport/http2/frame/data.ex index 9f7b6e369..78cd5a04f 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/data.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/data.ex @@ -17,10 +17,15 @@ defmodule GRPC.Transport.HTTP2.Frame.Data do @end_stream_bit 0 @padding_bit 3 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do - {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "DATA frame with zero stream_id (RFC9113§6.1)"} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), + "DATA frame with zero stream_id (RFC9113§6.1)"} end def deserialize(flags, stream_id, <>) diff --git a/grpc_core/lib/grpc/transport/http2/frame/goaway.ex b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex index c595d6d98..21aa9dc71 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/goaway.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex @@ -10,7 +10,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Goaway do debug_data: iodata() } - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize( _flags, diff --git a/grpc_core/lib/grpc/transport/http2/frame/headers.ex b/grpc_core/lib/grpc/transport/http2/frame/headers.ex index d6f160c31..3cd664d40 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/headers.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/headers.ex @@ -27,7 +27,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do @padding_bit 3 @priority_bit 5 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), @@ -130,7 +134,10 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do [ {0x1, set(flags), frame.stream_id, this_frame} | GRPC.Transport.HTTP2.Frame.Serializable.serialize( - %GRPC.Transport.HTTP2.Frame.Continuation{stream_id: frame.stream_id, fragment: rest}, + %GRPC.Transport.HTTP2.Frame.Continuation{ + stream_id: frame.stream_id, + fragment: rest + }, max_frame_size ) ] diff --git a/grpc_core/lib/grpc/transport/http2/frame/ping.ex b/grpc_core/lib/grpc/transport/http2/frame/ping.ex index d0a58c765..b238c7fdd 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/ping.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/ping.ex @@ -13,7 +13,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Ping do @ack_bit 0 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(flags, 0, <>) when set?(flags, @ack_bit) do {:ok, %__MODULE__{ack: true, payload: payload}} @@ -24,7 +28,8 @@ defmodule GRPC.Transport.HTTP2.Frame.Ping do end def deserialize(_flags, stream_id, _payload) when stream_id != 0 do - {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid stream ID in PING frame (RFC9113§6.7)"} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), + "Invalid stream ID in PING frame (RFC9113§6.7)"} end def deserialize(_flags, _stream_id, _payload) do diff --git a/grpc_core/lib/grpc/transport/http2/frame/priority.ex b/grpc_core/lib/grpc/transport/http2/frame/priority.ex index b82b9eae8..c66e835ea 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/priority.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/priority.ex @@ -14,7 +14,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Priority do weight: non_neg_integer() } - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), diff --git a/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex index b9bbbf062..ade51ec22 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex @@ -19,7 +19,11 @@ defmodule GRPC.Transport.HTTP2.Frame.PushPromise do @end_headers_bit 2 @padding_bit 3 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), diff --git a/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex index 0be9289e1..b9c9cede5 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex @@ -9,7 +9,11 @@ defmodule GRPC.Transport.HTTP2.Frame.RstStream do error_code: GRPC.Transport.HTTP2.Errors.error_code() } - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, 0, _payload) do {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), diff --git a/grpc_core/lib/grpc/transport/http2/frame/settings.ex b/grpc_core/lib/grpc/transport/http2/frame/settings.ex index f359ce324..41d7415a6 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/settings.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/settings.ex @@ -15,7 +15,11 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do @ack_bit 0 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(flags, 0, payload) when clear?(flags, @ack_bit) do payload @@ -33,25 +37,29 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do {:ok, {0x02, _value}}, {:ok, _acc} -> {:halt, - {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), + "Invalid enable_push value (RFC9113§6.5)"}} {:ok, {0x03, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_concurrent_streams, value)}} {:ok, {0x04, value}}, {:ok, _acc} when value > @max_window_size -> {:halt, - {:error, GRPC.Transport.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.flow_control_error(), + "Invalid window_size (RFC9113§6.5)"}} {:ok, {0x04, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :initial_window_size, value)}} {:ok, {0x05, value}}, {:ok, _acc} when value < @min_frame_size -> {:halt, - {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Invalid max_frame_size (RFC9113§6.5)"}} {:ok, {0x05, value}}, {:ok, _acc} when value > @max_frame_size -> {:halt, - {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Invalid max_frame_size (RFC9113§6.5)"}} {:ok, {0x05, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_frame_size, value)}} @@ -64,7 +72,8 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do {:error, _rest}, _acc -> {:halt, - {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid SETTINGS size (RFC9113§6.5)"}} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Invalid SETTINGS size (RFC9113§6.5)"}} end) |> case do {:ok, settings} -> {:ok, %__MODULE__{ack: false, settings: settings}} diff --git a/grpc_core/lib/grpc/transport/http2/frame/window_update.ex b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex index cf9924fdf..b24810e1e 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/window_update.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex @@ -13,7 +13,11 @@ defmodule GRPC.Transport.HTTP2.Frame.WindowUpdate do @max_window_increment (1 <<< 31) - 1 - @spec deserialize(GRPC.Transport.HTTP2.Frame.flags(), GRPC.Transport.HTTP2.Stream.stream_id(), iodata()) :: + @spec deserialize( + GRPC.Transport.HTTP2.Frame.flags(), + GRPC.Transport.HTTP2.Stream.stream_id(), + iodata() + ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} def deserialize(_flags, stream_id, <<_reserved::1, size_increment::31>>) when size_increment > 0 and size_increment <= @max_window_increment do @@ -21,7 +25,8 @@ defmodule GRPC.Transport.HTTP2.Frame.WindowUpdate do end def deserialize(_flags, _stream_id, _payload) do - {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} + {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), + "Invalid WINDOW_UPDATE frame (RFC9113§6.9)"} end defimpl GRPC.Transport.HTTP2.Frame.Serializable do From bfb5325baae7ff0d42d1062bd159462eaef89eb4 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 18:28:31 -0300 Subject: [PATCH 11/47] test: add comprehensive HTTP/2 test coverage for grpc_core --- .../test/grpc/transport/http2/errors_test.exs | 111 ++++++++ .../transport/http2/flow_control_test.exs | 134 +++++++++ .../test/grpc/transport/http2/frame_test.exs | 268 ++++++++++++++++++ 3 files changed, 513 insertions(+) create mode 100644 grpc_core/test/grpc/transport/http2/errors_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/flow_control_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame_test.exs diff --git a/grpc_core/test/grpc/transport/http2/errors_test.exs b/grpc_core/test/grpc/transport/http2/errors_test.exs new file mode 100644 index 000000000..3f771b588 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/errors_test.exs @@ -0,0 +1,111 @@ +defmodule GRPC.Transport.HTTP2.ErrorsTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Errors + + describe "error codes" do + test "returns correct code for no_error" do + assert Errors.no_error() == 0x0 + end + + test "returns correct code for protocol_error" do + assert Errors.protocol_error() == 0x1 + end + + test "returns correct code for internal_error" do + assert Errors.internal_error() == 0x2 + end + + test "returns correct code for flow_control_error" do + assert Errors.flow_control_error() == 0x3 + end + + test "returns correct code for settings_timeout" do + assert Errors.settings_timeout() == 0x4 + end + + test "returns correct code for stream_closed" do + assert Errors.stream_closed() == 0x5 + end + + test "returns correct code for frame_size_error" do + assert Errors.frame_size_error() == 0x6 + end + + test "returns correct code for refused_stream" do + assert Errors.refused_stream() == 0x7 + end + + test "returns correct code for cancel" do + assert Errors.cancel() == 0x8 + end + + test "returns correct code for compression_error" do + assert Errors.compression_error() == 0x9 + end + + test "returns correct code for connect_error" do + assert Errors.connect_error() == 0xA + end + + test "returns correct code for enhance_your_calm" do + assert Errors.enhance_your_calm() == 0xB + end + + test "returns correct code for inadequate_security" do + assert Errors.inadequate_security() == 0xC + end + + test "returns correct code for http_1_1_requires" do + assert Errors.http_1_1_requires() == 0xD + end + end + + describe "ConnectionError exception" do + test "can be raised with message" do + assert_raise Errors.ConnectionError, "test error", fn -> + raise Errors.ConnectionError, message: "test error" + end + end + + test "can be raised with error code" do + exception = %Errors.ConnectionError{message: "error", error_code: 0x1} + assert exception.error_code == 0x1 + end + + test "can be raised with both message and error code" do + exception = %Errors.ConnectionError{ + message: "protocol violation", + error_code: Errors.protocol_error() + } + + assert exception.message == "protocol violation" + assert exception.error_code == 0x1 + end + end + + describe "StreamError exception" do + test "can be raised with message" do + assert_raise Errors.StreamError, "stream error", fn -> + raise Errors.StreamError, message: "stream error" + end + end + + test "can be raised with stream_id" do + exception = %Errors.StreamError{message: "error", stream_id: 1} + assert exception.stream_id == 1 + end + + test "can be raised with all fields" do + exception = %Errors.StreamError{ + message: "stream closed", + error_code: Errors.stream_closed(), + stream_id: 3 + } + + assert exception.message == "stream closed" + assert exception.error_code == 0x5 + assert exception.stream_id == 3 + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/flow_control_test.exs b/grpc_core/test/grpc/transport/http2/flow_control_test.exs new file mode 100644 index 000000000..923f60d90 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/flow_control_test.exs @@ -0,0 +1,134 @@ +defmodule GRPC.Transport.HTTP2.FlowControlTest do + use ExUnit.Case, async: true + + import Bitwise + alias GRPC.Transport.HTTP2.FlowControl + + describe "compute_recv_window/2" do + test "returns correct window size when above minimum threshold" do + # Start with a large window above the minimum threshold (1GB) + large_window = (1 <<< 30) + 1_000_000 + # Receive small amount of data + {new_window, increment} = FlowControl.compute_recv_window(large_window, 1_024) + + # Should still be above threshold, no increment needed + assert new_window == large_window - 1_024 + assert increment == 0 + end + + test "returns window increment when below minimum threshold" do + # Start with minimum threshold + 1 (1GB + 1) + min_threshold = 1 <<< 30 + # Receive enough data to go below threshold + data_size = 2 + + {new_window, increment} = FlowControl.compute_recv_window(min_threshold + 1, data_size) + + # Should have sent a WINDOW_UPDATE + assert increment > 0 + # New window should be original - data + increment + assert new_window == min_threshold + 1 - data_size + increment + end + + test "respects maximum window increment" do + # Maximum increment is 2^31 - 1 + max_increment = (1 <<< 31) - 1 + + # Start with a very small window + {new_window, increment} = FlowControl.compute_recv_window(1, 0) + + # Increment should not exceed maximum + assert increment <= max_increment + assert new_window == 1 + increment + end + + test "respects maximum window size" do + # Maximum window size is 2^31 - 1 + max_window = (1 <<< 31) - 1 + + # Start with a small window that would overflow + {new_window, _increment} = FlowControl.compute_recv_window(1, 0) + + # New window should not exceed maximum + assert new_window <= max_window + end + + test "handles zero data size" do + initial_window = 65_535 + {new_window, increment} = FlowControl.compute_recv_window(initial_window, 0) + + # Window might change if below threshold (function computes new window) + # Just verify it's reasonable + assert new_window >= initial_window + assert increment >= 0 + end + + test "handles large data sizes" do + initial_window = 1 <<< 30 + # Receive 100MB of data + data_size = 100 * 1024 * 1024 + + {new_window, increment} = FlowControl.compute_recv_window(initial_window, data_size) + + # Window should be decreased by data size, then potentially increased by increment + # Just verify the window is reasonable (non-negative and less than max) + assert new_window >= 0 + assert new_window <= (1 <<< 31) - 1 + end + end + + describe "update_window/2" do + test "updates window size with positive increment" do + assert {:ok, 100} = FlowControl.update_window(50, 50) + end + + test "updates window size with negative increment" do + assert {:ok, 50} = FlowControl.update_window(100, -50) + end + + test "allows window size of zero" do + assert {:ok, 0} = FlowControl.update_window(50, -50) + end + + test "returns error when window size would exceed maximum" do + max_window = (1 <<< 31) - 1 + + assert {:error, :flow_control_error} = + FlowControl.update_window(max_window, 1) + end + + test "returns error when increment causes overflow" do + # Window size is at max, any positive increment should fail + max_window = (1 <<< 31) - 1 + + assert {:error, :flow_control_error} = + FlowControl.update_window(max_window, 100) + end + + test "allows update to exactly maximum window size" do + max_window = (1 <<< 31) - 1 + + assert {:ok, ^max_window} = FlowControl.update_window(max_window - 100, 100) + end + + test "handles large positive increments" do + large_increment = 1_000_000 + + assert {:ok, 1_000_050} = FlowControl.update_window(50, large_increment) + end + + test "handles large negative increments" do + large_decrement = -1_000_000 + + assert {:ok, 0} = FlowControl.update_window(1_000_000, large_decrement) + end + + test "RFC 9113 compliance - maximum window size is 2^31-1" do + # RFC 9113 §6.9.1 + max_window = (1 <<< 31) - 1 + + assert {:ok, ^max_window} = FlowControl.update_window(0, max_window) + assert {:error, :flow_control_error} = FlowControl.update_window(max_window, 1) + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame_test.exs b/grpc_core/test/grpc/transport/http2/frame_test.exs new file mode 100644 index 000000000..97075b74b --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame_test.exs @@ -0,0 +1,268 @@ +defmodule GRPC.Transport.HTTP2.FrameTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "deserialize/2" do + test "deserializes DATA frame (type 0x0)" do + # Frame: length=3, type=0, flags=0, stream_id=1, payload="abc" + data = <<3::24, 0::8, 0::8, 0::1, 1::31, "abc">> + + assert {{:ok, %Frame.Data{stream_id: 1, data: "abc"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes HEADERS frame (type 0x1)" do + # Frame: length=3, type=1, flags=4 (END_HEADERS), stream_id=1 + data = <<3::24, 1::8, 4::8, 0::1, 1::31, "hdr">> + + assert {{:ok, %Frame.Headers{stream_id: 1, end_headers: true, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes SETTINGS frame (type 0x4)" do + # Frame: length=0, type=4, flags=0, stream_id=0 (empty SETTINGS) + data = <<0::24, 4::8, 0::8, 0::1, 0::31>> + + assert {{:ok, %Frame.Settings{ack: false}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes PING frame (type 0x6)" do + # Frame: length=8, type=6, flags=0, stream_id=0, payload=8 bytes + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + data = <<8::24, 6::8, 0::8, 0::1, 0::31, payload::binary>> + + assert {{:ok, %Frame.Ping{ack: false, payload: ^payload}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes GOAWAY frame (type 0x7)" do + # Frame: length=8, type=7, flags=0, stream_id=0 + data = <<8::24, 7::8, 0::8, 0::1, 0::31, 0::1, 3::31, 0::32>> + + assert {{:ok, %Frame.Goaway{last_stream_id: 3, error_code: 0}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes WINDOW_UPDATE frame (type 0x8)" do + # Frame: length=4, type=8, flags=0, stream_id=1 + data = <<4::24, 8::8, 0::8, 0::1, 1::31, 0::1, 1000::31>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 1, size_increment: 1000}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes unknown frame type" do + # Frame: length=3, type=99 (unknown), flags=0, stream_id=1 + data = <<3::24, 99::8, 0::8, 0::1, 1::31, "xyz">> + + assert {{:ok, %Frame.Unknown{type: 99, stream_id: 1, payload: "xyz"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "returns error when payload exceeds max_frame_size" do + # Frame with length=17000 but max_frame_size=16384 + data = <<17000::24, 0::8, 0::8, 0::1, 1::31, "data">> + + assert {{:error, error_code, "Payload size too large (RFC9113§4.2)"}, _rest} = + Frame.deserialize(data, 16_384) + + assert error_code == Errors.frame_size_error() + end + + test "returns {:more, buffer} when frame is incomplete" do + # Partial frame (only header, no payload) + data = <<10::24, 0::8, 0::8, 0::1, 1::31>> + + assert {{:more, ^data}, <<>>} = Frame.deserialize(data, 16_384) + end + + test "returns nil for empty buffer" do + assert Frame.deserialize(<<>>, 16_384) == nil + end + + test "handles multiple frames in buffer" do + # Two DATA frames back-to-back + frame1 = <<3::24, 0::8, 0::8, 0::1, 1::31, "abc">> + frame2 = <<3::24, 0::8, 0::8, 0::1, 2::31, "def">> + data = frame1 <> frame2 + + assert {{:ok, %Frame.Data{stream_id: 1, data: "abc"}}, rest} = + Frame.deserialize(data, 16_384) + + assert {{:ok, %Frame.Data{stream_id: 2, data: "def"}}, <<>>} = + Frame.deserialize(rest, 16_384) + end + + test "preserves remaining data after deserialization" do + frame = <<3::24, 0::8, 0::8, 0::1, 1::31, "abc">> + extra = <<1, 2, 3, 4, 5>> + data = frame <> extra + + assert {{:ok, %Frame.Data{}}, ^extra} = Frame.deserialize(data, 16_384) + end + end + + describe "serialize/2" do + test "serializes DATA frame" do + frame = %Frame.Data{stream_id: 1, end_stream: false, data: "hello"} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Check frame header: length=5, type=0, flags=0, stream_id=1 + <> = binary + + assert length == 5 + assert type == 0 + assert stream_id == 1 + assert payload == "hello" + end + + test "serializes SETTINGS frame" do + frame = %Frame.Settings{ack: false, settings: []} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Check frame header: length=0, type=4, stream_id=0 + <> = binary + + assert length == 0 + assert type == 4 + assert stream_id == 0 + end + + test "serializes PING frame" do + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + frame = %Frame.Ping{ack: false, payload: payload} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Check frame header: length=8, type=6, stream_id=0 + <> = binary + + assert length == 8 + assert type == 6 + assert stream_id == 0 + assert data == payload + end + + test "handles iodata efficiently" do + # Frame.serialize should return iodata (list), not binary + frame = %Frame.Data{stream_id: 1, data: "test"} + + result = Frame.serialize(frame, 16_384) + + # Result should be iodata (list) + assert is_list(result) + + # But should convert to valid binary + binary = IO.iodata_to_binary(result) + assert is_binary(binary) + end + end + + describe "Frame.Flags" do + test "set/1 returns 0 for empty list" do + assert Frame.Flags.set([]) == 0x0 + end + + test "set/1 sets single bit" do + assert Frame.Flags.set([0]) == 0b00000001 + assert Frame.Flags.set([1]) == 0b00000010 + assert Frame.Flags.set([2]) == 0b00000100 + assert Frame.Flags.set([7]) == 0b10000000 + end + + test "set/1 sets multiple bits" do + assert Frame.Flags.set([0, 2]) == 0b00000101 + assert Frame.Flags.set([0, 1, 2, 3]) == 0b00001111 + end + + test "set?/2 guard works correctly" do + require Frame.Flags + + flags = 0b00000101 # bits 0 and 2 set + + assert Frame.Flags.set?(flags, 0) + refute Frame.Flags.set?(flags, 1) + assert Frame.Flags.set?(flags, 2) + refute Frame.Flags.set?(flags, 3) + end + + test "clear?/2 guard works correctly" do + require Frame.Flags + + flags = 0b00000101 # bits 0 and 2 set + + refute Frame.Flags.clear?(flags, 0) + assert Frame.Flags.clear?(flags, 1) + refute Frame.Flags.clear?(flags, 2) + assert Frame.Flags.clear?(flags, 3) + end + end + + describe "RFC 9113 compliance" do + test "default max_frame_size is 16,384 bytes" do + # RFC 9113 §4.2 + max_frame_size = 16_384 + + # Should accept frame at max size + data = <> + assert {{:ok, _frame}, <<>>} = Frame.deserialize(data, max_frame_size) + end + + test "reserved bit must be 0" do + # RFC 9113 §4.1 - Reserved bit (R) must be unset + # Frame with reserved bit = 0 + data = <<3::24, 0::8, 0::8, 0::1, 1::31, "abc">> + assert {{:ok, _frame}, <<>>} = Frame.deserialize(data, 16_384) + end + + test "frame types 0-9 are defined" do + # RFC 9113 §6 - Frame types + # DATA=0, HEADERS=1, PRIORITY=2, RST_STREAM=3, SETTINGS=4, + # PUSH_PROMISE=5, PING=6, GOAWAY=7, WINDOW_UPDATE=8, CONTINUATION=9 + + # Type 0 (DATA) requires stream_id > 0, so we test with stream_id=1 + data = <<0::24, 0::8, 0::8, 0::1, 1::31>> + assert {{:ok, frame}, _} = Frame.deserialize(data, 16_384) + refute match?(%Frame.Unknown{}, frame) + + # Types 1-9 can be tested with stream_id=0 (except some need valid payload) + for type <- 1..9 do + # Use appropriate payloads for each type + {stream_id, payload} = case type do + 1 -> {1, <<0, 0, 0, 0>>} # HEADERS needs priority data + 2 -> {1, <<0, 0, 0, 0, 0>>} # PRIORITY needs 5 bytes + 3 -> {1, <<0, 0, 0, 0>>} # RST_STREAM needs 4 bytes + 4 -> {0, <<>>} # SETTINGS + 5 -> {1, <<0, 0, 0, 0>>} # PUSH_PROMISE needs promised stream id + 6 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} # PING needs 8 bytes + 7 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} # GOAWAY needs 8 bytes + 8 -> {1, <<0, 0, 0, 100>>} # WINDOW_UPDATE needs 4 bytes + 9 -> {1, <<>>} # CONTINUATION + end + + length = byte_size(payload) + data = <> + result = Frame.deserialize(data, 16_384) + + # Should not return Unknown frame for types 0-9 + assert {{:ok, frame}, _} = result + refute match?(%Frame.Unknown{}, frame), "Type #{type} returned Unknown frame" + end + end + + test "unknown frame types are ignored gracefully" do + # RFC 9113 §4.1 - Implementations MUST ignore unknown frame types + data = <<3::24, 255::8, 0::8, 0::1, 1::31, "xyz">> + + assert {{:ok, %Frame.Unknown{type: 255}}, <<>>} = Frame.deserialize(data, 16_384) + end + end +end From 6533d44536d24d055e5a338d1b4eb6130063bac7 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 18:55:31 -0300 Subject: [PATCH 12/47] test(grpc_core): add comprehensive HTTP/2 frame tests for gRPC scenarios Add detailed test coverage for HTTP/2 frame implementations in the frame/ directory, focusing on gRPC-specific use cases and edge cases. These tests cover gRPC-specific HTTP/2 scenarios including trailers-only responses, large message handling, connection keepalive, and flow control patterns commonly used in gRPC implementations. --- .../http2/frame/continuation_test.exs | 359 ++++++++++++++++++ .../grpc/transport/http2/frame/data_test.exs | 181 +++++++++ .../transport/http2/frame/goaway_test.exs | 347 +++++++++++++++++ .../transport/http2/frame/headers_test.exs | 236 ++++++++++++ .../grpc/transport/http2/frame/ping_test.exs | 204 ++++++++++ .../transport/http2/frame/rst_stream_test.exs | 217 +++++++++++ .../transport/http2/frame/settings_test.exs | 189 +++++++++ .../http2/frame/window_update_test.exs | 285 ++++++++++++++ 8 files changed, 2018 insertions(+) create mode 100644 grpc_core/test/grpc/transport/http2/frame/continuation_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/data_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/goaway_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/headers_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/ping_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/rst_stream_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/settings_test.exs create mode 100644 grpc_core/test/grpc/transport/http2/frame/window_update_test.exs diff --git a/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs new file mode 100644 index 000000000..837cd9d1e --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs @@ -0,0 +1,359 @@ +defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "CONTINUATION frame deserialization" do + test "deserializes basic CONTINUATION frame" do + # CONTINUATION with END_HEADERS flag + data = <<3::24, 9::8, 0x4::8, 0::1, 1::31, "hdr">> + + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes CONTINUATION without END_HEADERS" do + # More CONTINUATION frames will follow + data = <<3::24, 9::8, 0x0::8, 0::1, 1::31, "hdr">> + + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: false, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes CONTINUATION with large fragment" do + fragment = String.duplicate("x", 16384) + data = <> + + assert {{:ok, %Frame.Continuation{stream_id: 1, fragment: ^fragment}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects CONTINUATION with stream_id 0" do + # RFC 9113 §6.10: CONTINUATION frames MUST be associated with a stream + data = <<3::24, 9::8, 0x4::8, 0::1, 0::31, "hdr">> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.protocol_error() + end + + test "handles empty CONTINUATION frame" do + # Edge case: CONTINUATION with no payload (unusual but valid) + data = <<0::24, 9::8, 0x4::8, 0::1, 1::31>> + + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "CONTINUATION frame serialization" do + test "serializes CONTINUATION with END_HEADERS" do + frame = %Frame.Continuation{ + stream_id: 123, + end_headers: true, + fragment: "hdr" + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<3::24, 9::8, 0x4::8, 0::1, 123::31, "hdr">> = binary + end + + test "serializes CONTINUATION with large fragment" do + fragment = String.duplicate("x", 10000) + + frame = %Frame.Continuation{ + stream_id: 123, + end_headers: true, + fragment: fragment + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<10000::24, 9::8, 0x4::8, 0::1, 123::31, received_fragment::binary>> = binary + assert received_fragment == fragment + end + end + + describe "CONTINUATION sequence scenarios" do + test "handles HEADERS + CONTINUATION sequence" do + # Large header block split across HEADERS and CONTINUATION + full_headers = "very-long-header-block-that-exceeds-max-frame-size" + + # When serializing with small max_frame_size, it splits automatically + headers = %Frame.Headers{ + stream_id: 1, + end_headers: true, + fragment: full_headers + } + + # Serialize with small frame size to force split + frames_io = Frame.serialize(headers, 20) + + # Should produce [HEADERS, CONTINUATION] + # 3 frames for 51 bytes with 20 byte limit + assert length(frames_io) == 3 + + # Deserialize first frame (HEADERS) + [h_io, c1_io, c2_io] = frames_io + h_binary = IO.iodata_to_binary(h_io) + c1_binary = IO.iodata_to_binary(c1_io) + c2_binary = IO.iodata_to_binary(c2_io) + + {{:ok, h_frame}, <<>>} = Frame.deserialize(h_binary, 16_384) + {{:ok, c1_frame}, <<>>} = Frame.deserialize(c1_binary, 16_384) + {{:ok, c2_frame}, <<>>} = Frame.deserialize(c2_binary, 16_384) + + # Reconstruct full header block + reconstructed = + IO.iodata_to_binary([h_frame.fragment, c1_frame.fragment, c2_frame.fragment]) + + assert reconstructed == full_headers + + # First HEADERS should not have END_HEADERS + assert h_frame.end_headers == false + # Middle CONTINUATION should not have END_HEADERS + assert c1_frame.end_headers == false + # Last CONTINUATION should have END_HEADERS + assert c2_frame.end_headers == true + end + + test "handles HEADERS + multiple CONTINUATION frames" do + # Very large header block requiring multiple CONTINUATIONs + full_fragment = "part1part2part3" + + headers = %Frame.Headers{stream_id: 1, end_headers: true, fragment: full_fragment} + + # Serialize with small max_frame_size + frames_io = Frame.serialize(headers, 5) + + # Should split into multiple frames + assert length(frames_io) == 3 + + [h_io, c1_io, c2_io] = frames_io + + h_bin = IO.iodata_to_binary(h_io) + c1_bin = IO.iodata_to_binary(c1_io) + c2_bin = IO.iodata_to_binary(c2_io) + + {{:ok, h}, <<>>} = Frame.deserialize(h_bin, 16_384) + {{:ok, c1}, <<>>} = Frame.deserialize(c1_bin, 16_384) + {{:ok, c2}, <<>>} = Frame.deserialize(c2_bin, 16_384) + + reconstructed = IO.iodata_to_binary([h.fragment, c1.fragment, c2.fragment]) + assert reconstructed == full_fragment + assert h.end_headers == false + assert c1.end_headers == false + assert c2.end_headers == true + end + + test "verifies stream_id consistency across sequence" do + # All frames in sequence must have same stream_id + headers = %Frame.Headers{stream_id: 5, end_headers: false, fragment: "h"} + cont = %Frame.Continuation{stream_id: 5, end_headers: true, fragment: "c"} + + h_bin = IO.iodata_to_binary(Frame.serialize(headers, 16_384)) + c_bin = IO.iodata_to_binary(Frame.serialize(cont, 16_384)) + + {{:ok, h_frame}, <<>>} = Frame.deserialize(h_bin, 16_384) + {{:ok, c_frame}, <<>>} = Frame.deserialize(c_bin, 16_384) + + assert h_frame.stream_id == c_frame.stream_id + end + end + + describe "gRPC-specific scenarios" do + test "handles large gRPC metadata headers" do + # gRPC metadata can be large, requiring CONTINUATION + metadata_headers = + for i <- 1..50 do + "x-custom-header-#{i}: value-#{i}\n" + end + |> Enum.join() + + # Simulate splitting at max_frame_size + max_size = 100 + chunks = for <>, do: chunk + + # Add any remaining bytes + remainder_size = rem(byte_size(metadata_headers), max_size) + + chunks = + if remainder_size > 0 do + chunks ++ + [ + binary_part( + metadata_headers, + byte_size(metadata_headers) - remainder_size, + remainder_size + ) + ] + else + chunks + end + + [first | rest] = chunks + + headers = %Frame.Headers{stream_id: 1, end_headers: false, fragment: first} + + # Middle chunks in CONTINUATION without END_HEADERS + middle = Enum.slice(rest, 0..-2//-1) + + continuations = + for {chunk, _idx} <- Enum.with_index(middle) do + %Frame.Continuation{stream_id: 1, end_headers: false, fragment: chunk} + end + + last_chunk = List.last(rest) + + final_cont = %Frame.Continuation{ + stream_id: 1, + end_headers: true, + fragment: last_chunk || <<>> + } + + all_frames = [headers | continuations] ++ [final_cont] + + serialized = Enum.map(all_frames, &Frame.serialize(&1, 16_384)) + assert length(serialized) == length(all_frames) + end + + test "handles gRPC trailers with CONTINUATION" do + # Trailers can be large if they include detailed error info + trailers = """ + grpc-status: 13 + grpc-message: Internal server error + grpc-status-details-bin: #{String.duplicate("x", 500)} + x-debug-info: #{String.duplicate("y", 500)} + """ + + # Split into HEADERS + CONTINUATION + split_point = 100 + + headers = %Frame.Headers{ + stream_id: 1, + end_stream: true, + end_headers: false, + fragment: binary_part(trailers, 0, split_point) + } + + continuation = %Frame.Continuation{ + stream_id: 1, + end_headers: true, + fragment: binary_part(trailers, split_point, byte_size(trailers) - split_point) + } + + h_bin = IO.iodata_to_binary(Frame.serialize(headers, 16_384)) + c_bin = IO.iodata_to_binary(Frame.serialize(continuation, 16_384)) + + {{:ok, h}, <<>>} = Frame.deserialize(h_bin, 16_384) + {{:ok, c}, <<>>} = Frame.deserialize(c_bin, 16_384) + + reconstructed = h.fragment <> c.fragment + assert reconstructed == trailers + assert h.end_stream == true + end + + test "handles HPACK compressed continuation" do + # HPACK encoding can produce variable-length output + # Simulated HPACK encoded data + hpack_encoded = <<0x82, 0x86, 0x84>> <> String.duplicate(<<0x41>>, 100) + + # If exceeds frame size, split into CONTINUATION + if byte_size(hpack_encoded) > 50 do + headers = %Frame.Headers{ + stream_id: 1, + end_headers: false, + fragment: binary_part(hpack_encoded, 0, 50) + } + + continuation = %Frame.Continuation{ + stream_id: 1, + end_headers: true, + fragment: binary_part(hpack_encoded, 50, byte_size(hpack_encoded) - 50) + } + + h_result = Frame.serialize(headers, 16_384) + c_result = Frame.serialize(continuation, 16_384) + + assert is_list(h_result) + assert is_list(c_result) + end + end + + test "handles interleaved stream violation detection" do + # RFC 9113: CONTINUATION frames MUST follow HEADERS immediately + # No other frames can be sent on ANY stream until END_HEADERS + + # Correct sequence: HEADERS(stream=1, no END_HEADERS) -> CONTINUATION(stream=1) + headers = %Frame.Headers{stream_id: 1, end_headers: false, fragment: "h1"} + continuation = %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "c1"} + + # Frames must be processed in order + h_bin = IO.iodata_to_binary(Frame.serialize(headers, 16_384)) + c_bin = IO.iodata_to_binary(Frame.serialize(continuation, 16_384)) + + # Deserialize in correct order + {{:ok, h}, <<>>} = Frame.deserialize(h_bin, 16_384) + {{:ok, c}, <<>>} = Frame.deserialize(c_bin, 16_384) + + assert h.stream_id == c.stream_id + assert c.end_headers == true + end + + test "handles CONTINUATION frame splitting strategy" do + # gRPC implementation should split at frame boundaries + large_metadata = String.duplicate("x-header: value\n", 1000) + max_frame = 16384 + + # Calculate number of frames needed + num_frames = div(byte_size(large_metadata), max_frame) + 1 + + # First frame is HEADERS + # Remaining are CONTINUATION + # Last frame has END_HEADERS + first_fragment = binary_part(large_metadata, 0, min(max_frame, byte_size(large_metadata))) + + headers = %Frame.Headers{ + stream_id: 1, + end_headers: num_frames == 1, + fragment: first_fragment + } + + assert is_struct(headers, Frame.Headers) + end + end + + describe "edge cases" do + test "handles maximum frame size CONTINUATION" do + # Test with exactly max_frame_size payload + max_payload = String.duplicate("x", 16384) + + frame = %Frame.Continuation{ + stream_id: 1, + end_headers: true, + fragment: max_payload + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert byte_size(binary) == 9 + 16384 + end + + test "handles minimum size CONTINUATION" do + frame = %Frame.Continuation{ + stream_id: 1, + end_headers: true, + fragment: <<>> + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<0::24, 9::8, 0x4::8, 0::1, 1::31>> = binary + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/data_test.exs b/grpc_core/test/grpc/transport/http2/frame/data_test.exs new file mode 100644 index 000000000..62a9e56d8 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/data_test.exs @@ -0,0 +1,181 @@ +defmodule GRPC.Transport.HTTP2.Frame.DataTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "DATA frame deserialization" do + test "deserializes basic DATA frame" do + # DATA frame: stream_id=1, no padding, end_stream=false, data="hello" + data = <<5::24, 0::8, 0::8, 0::1, 1::31, "hello">> + + assert {{:ok, %Frame.Data{stream_id: 1, end_stream: false, data: "hello"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes DATA frame with END_STREAM flag" do + # Flags: END_STREAM (0x1) + data = <<5::24, 0::8, 0x1::8, 0::1, 1::31, "hello">> + + assert {{:ok, %Frame.Data{stream_id: 1, end_stream: true, data: "hello"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes DATA frame with padding" do + # Flags: PADDED (0x8), padding_length=3 + payload = <<3::8, "hello", 0::8, 0::8, 0::8>> + data = <> + + assert {{:ok, %Frame.Data{stream_id: 1, data: "hello"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes empty DATA frame" do + data = <<0::24, 0::8, 0::8, 0::1, 1::31>> + + assert {{:ok, %Frame.Data{stream_id: 1, data: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects DATA frame with stream_id 0" do + # RFC 9113 §6.1: DATA frames MUST be associated with a stream + data = <<3::24, 0::8, 0::8, 0::1, 0::31, "abc">> + + assert {{:error, error_code, "DATA frame with zero stream_id (RFC9113§6.1)"}, <<>>} = + Frame.deserialize(data, 16_384) + + assert error_code == Errors.protocol_error() + end + + test "handles large DATA frames" do + large_data = :binary.copy(<<1>>, 10_000) + data = <<10_000::24, 0::8, 0::8, 0::1, 1::31, large_data::binary>> + + assert {{:ok, %Frame.Data{data: ^large_data}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects DATA frame with excessive padding" do + # Padding length exceeds payload + payload = <<10::8, "abc">> + data = <> + + assert {{:error, _error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + end + end + + describe "DATA frame serialization" do + test "serializes basic DATA frame" do + frame = %Frame.Data{stream_id: 123, end_stream: false, data: "hello"} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<5::24, 0::8, 0::8, 0::1, 123::31, "hello">> = binary + end + + test "serializes DATA frame with END_STREAM flag" do + frame = %Frame.Data{stream_id: 123, end_stream: true, data: "hello"} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<5::24, 0::8, 0x1::8, 0::1, 123::31, "hello">> = binary + end + + test "serializes empty DATA frame" do + frame = %Frame.Data{stream_id: 123, end_stream: false, data: <<>>} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<0::24, 0::8, 0::8, 0::1, 123::31>> = binary + end + + test "splits DATA frame exceeding max_frame_size" do + # 5 bytes of data, but max_frame_size is 2 + frame = %Frame.Data{stream_id: 123, end_stream: false, data: "hello"} + + result = Frame.serialize(frame, 2) + + assert [ + [<<2::24, 0::8, 0::8, 0::1, 123::31>>, "he"], + [<<2::24, 0::8, 0::8, 0::1, 123::31>>, "ll"], + [<<1::24, 0::8, 0::8, 0::1, 123::31>>, "o"] + ] = result + end + + test "sets END_STREAM only on last frame when splitting" do + # Should split into 3 frames, END_STREAM only on last + frame = %Frame.Data{stream_id: 123, end_stream: true, data: "hello"} + + result = Frame.serialize(frame, 2) + + # First two frames should not have END_STREAM + [[<<2::24, 0::8, 0x0::8, _::binary>>, _], [<<2::24, 0::8, 0x0::8, _::binary>>, _], _last] = + result + + # Last frame should have END_STREAM (0x1) + [[<<1::24, 0::8, 0x1::8, 0::1, 123::31>>, "o"]] = [List.last(result)] + end + + test "handles binary data (not just strings)" do + binary_data = <<0, 1, 2, 255, 254, 253>> + frame = %Frame.Data{stream_id: 123, end_stream: false, data: binary_data} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<6::24, 0::8, 0::8, 0::1, 123::31, ^binary_data::binary>> = binary + end + + test "preserves iodata structure for efficiency" do + frame = %Frame.Data{stream_id: 123, data: ["hello", " ", "world"]} + + result = Frame.serialize(frame, 16_384) + + # Result should be iodata (list) + assert is_list(result) + # But should flatten correctly + assert IO.iodata_to_binary(result) =~ "hello world" + end + end + + describe "gRPC-specific scenarios" do + test "handles gRPC message framing (5-byte length prefix)" do + # gRPC message: compressed_flag(1) + length(4) + data + grpc_msg = <<0::8, 5::32, "hello">> + frame = %Frame.Data{stream_id: 1, end_stream: false, data: grpc_msg} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<_header::9-bytes, payload::binary>> = binary + assert payload == grpc_msg + end + + test "handles multiple gRPC messages in single DATA frame" do + # Two gRPC messages back-to-back + msg1 = <<0::8, 5::32, "hello">> + msg2 = <<0::8, 5::32, "world">> + combined = msg1 <> msg2 + + frame = %Frame.Data{stream_id: 1, end_stream: false, data: combined} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<_header::9-bytes, payload::binary>> = binary + assert payload == combined + end + + test "handles compressed gRPC messages" do + # Compressed message: flag=1 + compressed_msg = <<1::8, 100::32, :zlib.compress("large data")::binary>> + frame = %Frame.Data{stream_id: 1, end_stream: false, data: compressed_msg} + + result = Frame.serialize(frame, 16_384) + assert is_list(result) + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/goaway_test.exs b/grpc_core/test/grpc/transport/http2/frame/goaway_test.exs new file mode 100644 index 000000000..c87d72433 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/goaway_test.exs @@ -0,0 +1,347 @@ +defmodule GRPC.Transport.HTTP2.Frame.GoawayTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "GOAWAY frame deserialization" do + test "deserializes GOAWAY frame" do + # GOAWAY: last_stream_id=123, error=NO_ERROR, debug="" + payload = <<0::1, 123::31, 0x0::32>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 123, error_code: 0x0, debug_data: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes GOAWAY with debug data" do + debug = "shutting down" + payload = <<0::1, 123::31, 0x0::32, debug::binary>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 123, error_code: 0x0, debug_data: ^debug}}, + <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes GOAWAY with PROTOCOL_ERROR" do + payload = <<0::1, 50::31, 0x1::32>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 50, error_code: 0x1, debug_data: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes GOAWAY with ENHANCE_YOUR_CALM" do + payload = <<0::1, 10::31, 0xB::32>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 10, error_code: 0xB, debug_data: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes GOAWAY with last_stream_id 0" do + # No streams were processed + payload = <<0::1, 0::31, 0x0::32>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 0, error_code: 0x0, debug_data: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects GOAWAY with non-zero stream_id" do + # RFC 9113 §6.8: GOAWAY frames MUST be associated with stream 0 + payload = <<0::1, 123::31, 0x0::32>> + data = <> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.protocol_error() + end + + # Note: GOAWAY deserialization uses pattern matching, so insufficient length + # causes a function clause error rather than returning an error tuple + # This is by design - the frame parser validates lengths before deserialization + + test "handles GOAWAY with large debug data" do + debug = String.duplicate("debug info ", 100) + payload = <<0::1, 123::31, 0x0::32, debug::binary>> + data = <> + + assert {{:ok, %Frame.Goaway{debug_data: ^debug}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "handles reserved bit correctly" do + # Reserved bit should be ignored + payload = <<1::1, 123::31, 0x0::32>> + data = <> + + assert {{:ok, %Frame.Goaway{last_stream_id: 123}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "GOAWAY frame serialization" do + test "serializes GOAWAY with NO_ERROR" do + frame = %Frame.Goaway{ + last_stream_id: 123, + error_code: 0x0, + debug_data: <<>> + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<8::24, 7::8, 0x0::8, 0::1, 0::31, 0::1, 123::31, 0x0::32>> = binary + end + + test "serializes GOAWAY with debug data" do + debug = "shutting down" + + frame = %Frame.Goaway{ + last_stream_id: 123, + error_code: 0x0, + debug_data: debug + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + payload_length = 8 + byte_size(debug) + + <<^payload_length::24, 7::8, 0x0::8, 0::1, 0::31, 0::1, 123::31, 0x0::32, ^debug::binary>> = + binary + end + + test "serializes GOAWAY with INTERNAL_ERROR" do + frame = %Frame.Goaway{ + last_stream_id: 50, + error_code: 0x2, + debug_data: <<>> + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<8::24, 7::8, 0x0::8, 0::1, 0::31, 0::1, 50::31, 0x2::32>> = binary + end + + test "sets reserved bit to 0" do + frame = %Frame.Goaway{ + last_stream_id: 123, + error_code: 0x0, + debug_data: <<>> + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<_::9-bytes, reserved::1, _last_stream::31, _error::32>> = binary + assert reserved == 0 + end + end + + describe "gRPC-specific scenarios" do + test "handles graceful server shutdown" do + # Server initiates graceful shutdown with NO_ERROR + # Sets last_stream_id to highest processed stream + shutdown = %Frame.Goaway{ + last_stream_id: 99, + error_code: Errors.no_error(), + debug_data: "server shutting down gracefully" + } + + result = Frame.serialize(shutdown, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.last_stream_id == 99 + assert received.error_code == Errors.no_error() + assert received.debug_data =~ "graceful" + end + + test "handles connection timeout" do + # Connection idle timeout triggers GOAWAY + timeout = %Frame.Goaway{ + last_stream_id: 50, + error_code: Errors.no_error(), + debug_data: "idle timeout" + } + + result = Frame.serialize(timeout, 16_384) + assert is_list(result) + end + + test "handles protocol violation shutdown" do + # Peer violates protocol, connection terminated + violation = %Frame.Goaway{ + last_stream_id: 25, + error_code: Errors.protocol_error(), + debug_data: "invalid frame sequence" + } + + result = Frame.serialize(violation, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.protocol_error() + end + + test "handles connection overload" do + # Server overloaded, sends GOAWAY with ENHANCE_YOUR_CALM + overload = %Frame.Goaway{ + last_stream_id: 10, + error_code: Errors.enhance_your_calm(), + debug_data: "too many requests" + } + + result = Frame.serialize(overload, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.enhance_your_calm() + end + + test "handles client-initiated close" do + # Client closes connection cleanly + close = %Frame.Goaway{ + last_stream_id: 0, + error_code: Errors.no_error(), + debug_data: <<>> + } + + result = Frame.serialize(close, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<8::24, 7::8, 0x0::8, 0::1, 0::31, 0::1, 0::31, _::32>> = binary + end + + test "handles two-phase shutdown" do + # Server sends GOAWAY with high last_stream_id + # Waits for in-flight streams to complete + # Sends final GOAWAY with actual last_stream_id + + phase1 = %Frame.Goaway{ + last_stream_id: 0x7FFFFFFF, + error_code: Errors.no_error(), + debug_data: "draining connections" + } + + phase2 = %Frame.Goaway{ + last_stream_id: 42, + error_code: Errors.no_error(), + debug_data: "final shutdown" + } + + phase1_binary = IO.iodata_to_binary(Frame.serialize(phase1, 16_384)) + phase2_binary = IO.iodata_to_binary(Frame.serialize(phase2, 16_384)) + + {{:ok, p1}, <<>>} = Frame.deserialize(phase1_binary, 16_384) + {{:ok, p2}, <<>>} = Frame.deserialize(phase2_binary, 16_384) + + assert p1.last_stream_id == 0x7FFFFFFF + assert p2.last_stream_id == 42 + end + + test "handles internal error during processing" do + # Unexpected internal error triggers immediate shutdown + internal = %Frame.Goaway{ + last_stream_id: 15, + error_code: Errors.internal_error(), + debug_data: "unexpected exception in handler" + } + + result = Frame.serialize(internal, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.internal_error() + end + + test "handles flow control error shutdown" do + # Global flow control violation requires connection close + flow_error = %Frame.Goaway{ + last_stream_id: 20, + error_code: Errors.flow_control_error(), + debug_data: "connection window exceeded" + } + + result = Frame.serialize(flow_error, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.flow_control_error() + end + + test "includes diagnostic info in debug data" do + # Useful debug information for troubleshooting + diagnostic = %Frame.Goaway{ + last_stream_id: 30, + error_code: Errors.protocol_error(), + debug_data: "frame_type=1 stream_id=31 error=invalid_headers" + } + + result = Frame.serialize(diagnostic, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.debug_data =~ "frame_type" + assert received.debug_data =~ "invalid_headers" + end + + test "handles connection state after GOAWAY" do + # After sending GOAWAY, no new streams should be created + # Existing streams can complete + + goaway = %Frame.Goaway{ + last_stream_id: 10, + error_code: Errors.no_error(), + debug_data: "no new streams" + } + + result = Frame.serialize(goaway, 16_384) + binary = IO.iodata_to_binary(result) + + # Any stream_id <= 10 can still send frames + # Any stream_id > 10 should be rejected + {{:ok, %{last_stream_id: last_stream}}, <<>>} = Frame.deserialize(binary, 16_384) + assert last_stream == 10 + end + end + + describe "error code mapping" do + test "maps all standard HTTP/2 error codes" do + error_scenarios = [ + {Errors.no_error(), "clean shutdown"}, + {Errors.protocol_error(), "protocol violation"}, + {Errors.internal_error(), "internal server error"}, + {Errors.flow_control_error(), "flow control violation"}, + {Errors.settings_timeout(), "settings timeout"}, + {Errors.stream_closed(), "frame on closed stream"}, + {Errors.frame_size_error(), "invalid frame size"}, + {Errors.refused_stream(), "stream refused"}, + {Errors.cancel(), "operation cancelled"}, + {Errors.compression_error(), "compression error"}, + {Errors.connect_error(), "connect error"}, + {Errors.enhance_your_calm(), "excessive load"}, + {Errors.inadequate_security(), "security requirements not met"}, + {Errors.http_1_1_requires(), "HTTP/1.1 required"} + ] + + for {error_code, description} <- error_scenarios do + frame = %Frame.Goaway{ + last_stream_id: 1, + error_code: error_code, + debug_data: description + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == error_code + assert received.debug_data == description + end + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/headers_test.exs b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs new file mode 100644 index 000000000..07d2f8b50 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs @@ -0,0 +1,236 @@ +defmodule GRPC.Transport.HTTP2.Frame.HeadersTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "HEADERS frame deserialization" do + test "deserializes basic HEADERS frame" do + # HEADERS frame with END_HEADERS flag + data = <<3::24, 1::8, 0x4::8, 0::1, 1::31, "hdr">> + + assert {{:ok, %Frame.Headers{stream_id: 1, end_headers: true, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes HEADERS frame with END_STREAM flag" do + # Flags: END_STREAM (0x1) + END_HEADERS (0x4) = 0x5 + data = <<3::24, 1::8, 0x5::8, 0::1, 1::31, "hdr">> + + assert {{:ok, + %Frame.Headers{stream_id: 1, end_stream: true, end_headers: true, fragment: "hdr"}}, + <<>>} = Frame.deserialize(data, 16_384) + end + + test "deserializes HEADERS frame without END_HEADERS (needs CONTINUATION)" do + # No END_HEADERS flag - requires CONTINUATION + data = <<3::24, 1::8, 0x0::8, 0::1, 1::31, "hdr">> + + assert {{:ok, %Frame.Headers{stream_id: 1, end_headers: false, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes HEADERS frame with PRIORITY flag" do + # Flags: PRIORITY (0x20) + END_HEADERS (0x4) = 0x24 + # Priority: exclusive=1, dependency=5, weight=10 + priority = <<1::1, 5::31, 10::8>> + + data = + < "hdr")::24, 1::8, 0x24::8, 0::1, 1::31, priority::binary, "hdr">> + + assert {{:ok, + %Frame.Headers{ + stream_id: 1, + exclusive_dependency: true, + stream_dependency: 5, + weight: 10, + fragment: "hdr" + }}, <<>>} = Frame.deserialize(data, 16_384) + end + + test "deserializes HEADERS frame with padding" do + # Flags: PADDED (0x8) + END_HEADERS (0x4) = 0xC + payload = <<2::8, "hdr", 0::8, 0::8>> + data = <> + + assert {{:ok, %Frame.Headers{stream_id: 1, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects HEADERS frame with stream_id 0" do + # RFC 9113 §6.2: HEADERS frames MUST be associated with a stream + data = <<3::24, 1::8, 0x4::8, 0::1, 0::31, "hdr">> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.protocol_error() + end + + test "handles HPACK compressed headers" do + # Simulated HPACK encoded headers (not real HPACK encoding) + hpack_data = <<0x82, 0x86, 0x84, 0x41, 0x0F>> + data = <> + + assert {{:ok, %Frame.Headers{fragment: ^hpack_data}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "HEADERS frame serialization" do + test "serializes basic HEADERS frame with END_HEADERS" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + end_headers: true, + fragment: "hdr" + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Flags: END_HEADERS (0x4) + assert <<3::24, 1::8, 0x4::8, 0::1, 123::31, "hdr">> = binary + end + + test "serializes HEADERS frame with END_STREAM and END_HEADERS" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: true, + end_headers: true, + fragment: "hdr" + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Flags: END_STREAM (0x1) + END_HEADERS (0x4) = 0x5 + assert <<3::24, 1::8, 0x5::8, 0::1, 123::31, "hdr">> = binary + end + + test "splits large HEADERS into HEADERS + CONTINUATION frames" do + # 6 bytes of headers, max_frame_size = 2 + frame = %Frame.Headers{ + stream_id: 123, + end_stream: false, + end_headers: true, + fragment: "header" + } + + result = Frame.serialize(frame, 2) + + # Should produce: HEADERS (no END_HEADERS) + CONTINUATION + CONTINUATION (END_HEADERS) + assert [ + [<<2::24, 1::8, 0x0::8, _::binary>>, "he"], + [<<2::24, 9::8, 0x0::8, _::binary>>, "ad"], + [<<2::24, 9::8, 0x4::8, _::binary>>, "er"] + ] = result + end + + test "CONTINUATION frames have END_HEADERS only on last frame" do + frame = %Frame.Headers{ + stream_id: 123, + end_headers: true, + fragment: "abcde" + } + + result = Frame.serialize(frame, 2) + + # First HEADERS: no END_HEADERS + [[<<2::24, 1::8, flags1::8, _::binary>>, _] | continuation_frames] = result + assert flags1 == 0x0 + + # Middle CONTINUATION: no END_HEADERS + middle_frames = Enum.slice(continuation_frames, 0..-2//-1) + + for [<<_::24, 9::8, flags::8, _::binary>>, _] <- middle_frames do + assert flags == 0x0 + end + + # Last CONTINUATION: END_HEADERS (0x4) + [[<<_::24, 9::8, last_flags::8, _::binary>>, _]] = + Enum.slice(continuation_frames, -1..-1//1) + + assert last_flags == 0x4 + end + + test "preserves END_STREAM flag when splitting" do + frame = %Frame.Headers{ + stream_id: 123, + end_stream: true, + end_headers: true, + fragment: "abcde" + } + + result = Frame.serialize(frame, 2) + + # First HEADERS should have END_STREAM (0x1) but not END_HEADERS + [[<<2::24, 1::8, 0x1::8, _::binary>>, _] | _] = result + end + end + + describe "gRPC-specific scenarios" do + test "handles gRPC pseudo-headers" do + # gRPC uses HTTP/2 pseudo-headers: :method, :scheme, :path, :authority + # These would be HPACK encoded, but we test with raw bytes + grpc_headers = ":method: POST\n:path: /my.Service/Method" + frame = %Frame.Headers{stream_id: 1, end_headers: true, fragment: grpc_headers} + + result = Frame.serialize(frame, 16_384) + assert is_list(result) + end + + test "handles gRPC metadata headers" do + # gRPC metadata: custom headers, timeout, compression + metadata = "grpc-timeout: 1S\ngrpc-encoding: gzip\nx-custom: value" + frame = %Frame.Headers{stream_id: 1, end_headers: true, fragment: metadata} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<_header::9-bytes, payload::binary>> = binary + assert payload == metadata + end + + test "handles trailers-only response (no DATA frames)" do + # gRPC can send trailers-only response for immediate errors + # HEADERS frame with both END_STREAM and END_HEADERS + trailers = "grpc-status: 0\ngrpc-message: OK" + + frame = %Frame.Headers{ + stream_id: 1, + end_stream: true, + end_headers: true, + fragment: trailers + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Should have both END_STREAM (0x1) and END_HEADERS (0x4) = 0x5 + <<_length::24, 1::8, 0x5::8, _::binary>> = binary + end + + test "handles large metadata requiring continuation" do + # Large custom metadata that exceeds max_frame_size + large_metadata = String.duplicate("x-custom-#{:rand.uniform(1000)}: value\n", 100) + + frame = %Frame.Headers{ + stream_id: 1, + end_headers: true, + fragment: large_metadata + } + + result = Frame.serialize(frame, 100) + + # Should split into multiple frames + assert length(result) > 1 + + # First should be HEADERS + [[<<_::24, 1::8, _::8, _::binary>>, _] | continuation] = result + + # Rest should be CONTINUATION (type 9) + for [<<_::24, 9::8, _::8, _::binary>>, _] <- continuation do + assert true + end + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/ping_test.exs b/grpc_core/test/grpc/transport/http2/frame/ping_test.exs new file mode 100644 index 000000000..5c3acc462 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/ping_test.exs @@ -0,0 +1,204 @@ +defmodule GRPC.Transport.HTTP2.Frame.PingTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "PING frame deserialization" do + test "deserializes PING frame" do + # PING with 8 bytes of opaque data + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + data = <<8::24, 6::8, 0x0::8, 0::1, 0::31, payload::binary>> + + assert {{:ok, %Frame.Ping{ack: false, payload: ^payload}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes PING ACK frame" do + # PING with ACK flag (0x1) + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + data = <<8::24, 6::8, 0x1::8, 0::1, 0::31, payload::binary>> + + assert {{:ok, %Frame.Ping{ack: true, payload: ^payload}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects PING frame with non-zero stream_id" do + # RFC 9113 §6.7: PING frames MUST be associated with stream 0 + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + data = <<8::24, 6::8, 0x0::8, 0::1, 1::31, payload::binary>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.protocol_error() + end + + test "rejects PING frame with incorrect length" do + # RFC 9113 §6.7: PING frames MUST be exactly 8 bytes + payload = <<1, 2, 3, 4>> + data = <<4::24, 6::8, 0x0::8, 0::1, 0::31, payload::binary>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.frame_size_error() + end + + test "rejects PING frame with length too large" do + # Must be exactly 8 bytes, not more + payload = <<1, 2, 3, 4, 5, 6, 7, 8, 9, 10>> + data = <<10::24, 6::8, 0x0::8, 0::1, 0::31, payload::binary>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.frame_size_error() + end + + test "handles PING with all zeros" do + payload = <<0, 0, 0, 0, 0, 0, 0, 0>> + data = <<8::24, 6::8, 0x0::8, 0::1, 0::31, payload::binary>> + + assert {{:ok, %Frame.Ping{ack: false, payload: ^payload}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "handles PING with all ones" do + payload = <<255, 255, 255, 255, 255, 255, 255, 255>> + data = <<8::24, 6::8, 0x0::8, 0::1, 0::31, payload::binary>> + + assert {{:ok, %Frame.Ping{ack: false, payload: ^payload}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "PING frame serialization" do + test "serializes PING frame" do + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + frame = %Frame.Ping{ack: false, payload: payload} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<8::24, 6::8, 0x0::8, 0::1, 0::31, ^payload::binary>> = binary + end + + test "serializes PING ACK frame" do + payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + frame = %Frame.Ping{ack: true, payload: payload} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<8::24, 6::8, 0x1::8, 0::1, 0::31, ^payload::binary>> = binary + end + + test "preserves payload exactly" do + # Ensure payload is not modified + payload = <<0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xBA, 0xBE>> + frame = %Frame.Ping{ack: false, payload: payload} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<_::9-bytes, received_payload::8-bytes>> = binary + assert received_payload == payload + end + end + + describe "PING round-trip" do + test "PING request and ACK response match payload" do + # Client sends PING + client_payload = <<1, 2, 3, 4, 5, 6, 7, 8>> + ping = %Frame.Ping{ack: false, payload: client_payload} + + # Server responds with PING ACK containing same payload + pong = %Frame.Ping{ack: true, payload: client_payload} + + ping_binary = IO.iodata_to_binary(Frame.serialize(ping, 16_384)) + pong_binary = IO.iodata_to_binary(Frame.serialize(pong, 16_384)) + + # Deserialize both + {{:ok, ping_frame}, <<>>} = Frame.deserialize(ping_binary, 16_384) + {{:ok, pong_frame}, <<>>} = Frame.deserialize(pong_binary, 16_384) + + # Verify payload matches + assert IO.iodata_to_binary(ping_frame.payload) == IO.iodata_to_binary(pong_frame.payload) + assert ping_frame.ack == false + assert pong_frame.ack == true + end + end + + describe "gRPC-specific scenarios" do + test "handles keepalive PING" do + # gRPC uses PING for connection keepalive + # Typically uses timestamp or counter as payload + timestamp = System.system_time(:millisecond) + payload = <> + + ping = %Frame.Ping{ack: false, payload: payload} + pong = %Frame.Ping{ack: true, payload: payload} + + ping_frame = Frame.serialize(ping, 16_384) + pong_frame = Frame.serialize(pong, 16_384) + + assert is_list(ping_frame) + assert is_list(pong_frame) + end + + test "handles latency measurement" do + # Can use PING to measure RTT + # Use a positive timestamp value + timestamp = System.system_time(:millisecond) + payload = <> + + # Send PING + ping = %Frame.Ping{ack: false, payload: payload} + _ping_binary = IO.iodata_to_binary(Frame.serialize(ping, 16_384)) + + # Receive PING ACK + {{:ok, pong}, <<>>} = + Frame.deserialize(<<8::24, 6::8, 0x1::8, 0::1, 0::31, payload::binary>>, 16_384) + + # Calculate RTT (in real scenario, would have network delay) + <> = IO.iodata_to_binary(pong.payload) + assert received_time == timestamp + end + + test "handles connection health check" do + # gRPC clients periodically send PING to check connection + health_check = %Frame.Ping{ + ack: false, + payload: <<"HEALTH", 0::16>> + } + + result = Frame.serialize(health_check, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert IO.iodata_to_binary(received.payload) == <<"HEALTH", 0::16>> + end + + test "handles PING flood protection scenario" do + # In production, need to rate-limit PING frames + # Test multiple PINGs with different payload + pings = + for i <- 1..10 do + %Frame.Ping{ack: false, payload: <>} + end + + serialized = Enum.map(pings, &Frame.serialize(&1, 16_384)) + + assert length(serialized) == 10 + assert Enum.all?(serialized, &is_list/1) + end + + test "handles PING timeout scenario" do + # Send PING, simulate no response (timeout detection) + ping = %Frame.Ping{ + ack: false, + payload: <> + } + + ping_binary = IO.iodata_to_binary(Frame.serialize(ping, 16_384)) + + # In real implementation, would start timer and close connection if no ACK + assert byte_size(ping_binary) == 17 + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/rst_stream_test.exs b/grpc_core/test/grpc/transport/http2/frame/rst_stream_test.exs new file mode 100644 index 000000000..cf40580bb --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/rst_stream_test.exs @@ -0,0 +1,217 @@ +defmodule GRPC.Transport.HTTP2.Frame.RstStreamTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "RST_STREAM frame deserialization" do + test "deserializes RST_STREAM with NO_ERROR" do + # RST_STREAM with error code NO_ERROR (0x0) + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x0::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0x0}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes RST_STREAM with PROTOCOL_ERROR" do + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x1::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0x1}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes RST_STREAM with INTERNAL_ERROR" do + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x2::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0x2}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes RST_STREAM with FLOW_CONTROL_ERROR" do + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x3::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0x3}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes RST_STREAM with CANCEL" do + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x8::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0x8}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects RST_STREAM with stream_id 0" do + # RFC 9113 §6.4: RST_STREAM frames MUST be associated with a stream + data = <<4::24, 3::8, 0x0::8, 0::1, 0::31, 0x8::32>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.protocol_error() + end + + test "rejects RST_STREAM with incorrect length" do + # RFC 9113 §6.4: RST_STREAM frames MUST be 4 bytes + data = <<2::24, 3::8, 0x0::8, 0::1, 123::31, 0x8::16>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.frame_size_error() + end + + test "handles RST_STREAM with unknown error code" do + # Unknown error codes should still be accepted + data = <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0xFF::32>> + + assert {{:ok, %Frame.RstStream{stream_id: 123, error_code: 0xFF}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "RST_STREAM frame serialization" do + test "serializes RST_STREAM with NO_ERROR" do + frame = %Frame.RstStream{stream_id: 123, error_code: 0x0} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x0::32>> = binary + end + + test "serializes RST_STREAM with CANCEL" do + frame = %Frame.RstStream{stream_id: 123, error_code: 0x8} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 3::8, 0x0::8, 0::1, 123::31, 0x8::32>> = binary + end + + test "serializes RST_STREAM with INTERNAL_ERROR" do + frame = %Frame.RstStream{stream_id: 456, error_code: 0x2} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 3::8, 0x0::8, 0::1, 456::31, 0x2::32>> = binary + end + end + + describe "gRPC-specific scenarios" do + test "handles client cancellation" do + # Client cancels RPC by sending RST_STREAM with CANCEL (0x8) + cancel = %Frame.RstStream{stream_id: 1, error_code: Errors.cancel()} + + result = Frame.serialize(cancel, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.cancel() + end + + test "handles server rejecting stream" do + # Server rejects stream due to overload with REFUSED_STREAM (0x7) + reject = %Frame.RstStream{stream_id: 1, error_code: Errors.refused_stream()} + + result = Frame.serialize(reject, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.error_code == Errors.refused_stream() + end + + test "handles flow control violation" do + # RST_STREAM sent when peer violates flow control + flow_error = %Frame.RstStream{ + stream_id: 5, + error_code: Errors.flow_control_error() + } + + result = Frame.serialize(flow_error, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 3::8, 0x0::8, 0::1, 5::31, _error::32>> = binary + end + + test "handles stream timeout" do + # Application-level timeout can trigger RST_STREAM with CANCEL + timeout = %Frame.RstStream{ + stream_id: 10, + error_code: Errors.cancel() + } + + result = Frame.serialize(timeout, 16_384) + assert is_list(result) + end + + test "handles protocol violation on stream" do + # Protocol error specific to a stream + protocol_err = %Frame.RstStream{ + stream_id: 15, + error_code: Errors.protocol_error() + } + + result = Frame.serialize(protocol_err, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.stream_id == 15 + assert received.error_code == Errors.protocol_error() + end + + test "handles concurrent stream resets" do + # Multiple streams can be reset independently + resets = + for stream_id <- 1..10 do + %Frame.RstStream{stream_id: stream_id, error_code: Errors.cancel()} + end + + serialized = Enum.map(resets, &Frame.serialize(&1, 16_384)) + + assert length(serialized) == 10 + assert Enum.all?(serialized, &is_list/1) + end + + test "handles RST_STREAM after partial message" do + # Stream reset while message is being transmitted + rst = %Frame.RstStream{ + stream_id: 3, + error_code: Errors.internal_error() + } + + result = Frame.serialize(rst, 16_384) + binary = IO.iodata_to_binary(result) + + # Verify frame structure + assert <<4::24, 3::8, 0x0::8, 0::1, 3::31, _::32>> = binary + end + + test "handles RST_STREAM for idle stream" do + # Receiving RST_STREAM for stream that was never opened + # Implementation should handle gracefully + rst = %Frame.RstStream{stream_id: 999, error_code: 0x1} + + result = Frame.serialize(rst, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.stream_id == 999 + end + + test "error code mapping to gRPC status" do + # Different HTTP/2 errors map to different gRPC status codes + error_codes = [ + {Errors.no_error(), "NO_ERROR - clean shutdown"}, + {Errors.protocol_error(), "PROTOCOL_ERROR - invalid protocol state"}, + {Errors.internal_error(), "INTERNAL_ERROR - server error"}, + {Errors.flow_control_error(), "FLOW_CONTROL_ERROR - window violated"}, + {Errors.cancel(), "CANCEL - client cancellation"}, + {Errors.refused_stream(), "REFUSED_STREAM - server overload"} + ] + + for {error_code, _description} <- error_codes do + frame = %Frame.RstStream{stream_id: 1, error_code: error_code} + result = Frame.serialize(frame, 16_384) + assert is_list(result) + end + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/settings_test.exs b/grpc_core/test/grpc/transport/http2/frame/settings_test.exs new file mode 100644 index 000000000..4558ca3a1 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/settings_test.exs @@ -0,0 +1,189 @@ +defmodule GRPC.Transport.HTTP2.Frame.SettingsTest do + use ExUnit.Case, async: true + + alias GRPC.Transport.HTTP2.Frame + + describe "SETTINGS frame deserialization" do + test "deserializes empty SETTINGS frame" do + # Empty SETTINGS frame (connection preface) + data = <<0::24, 4::8, 0x0::8, 0::1, 0::31>> + + assert {{:ok, %Frame.Settings{ack: false, settings: settings}}, <<>>} = + Frame.deserialize(data, 16_384) + + assert settings == %{} + end + + test "deserializes SETTINGS ACK frame" do + # SETTINGS frame with ACK flag (0x1) + data = <<0::24, 4::8, 0x1::8, 0::1, 0::31>> + + assert {{:ok, %Frame.Settings{ack: true, settings: nil}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes SETTINGS frame with max_concurrent_streams" do + # SETTINGS_MAX_CONCURRENT_STREAMS (0x3) = 100 + settings_payload = <<0x3::16, 100::32>> + + data = + <> + + assert {{:ok, %Frame.Settings{settings: %{max_concurrent_streams: 100}}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes SETTINGS frame with multiple settings" do + settings_payload = << + 0x1::16, + 8192::32, + # SETTINGS_HEADER_TABLE_SIZE + 0x3::16, + 100::32, + # SETTINGS_MAX_CONCURRENT_STREAMS + 0x4::16, + 32768::32 + # SETTINGS_INITIAL_WINDOW_SIZE + >> + + data = + <> + + assert {{:ok, + %Frame.Settings{ + settings: %{ + header_table_size: 8192, + max_concurrent_streams: 100, + initial_window_size: 32768 + } + }}, <<>>} = Frame.deserialize(data, 16_384) + end + end + + describe "SETTINGS frame serialization" do + test "serializes empty SETTINGS frame" do + frame = %Frame.Settings{ack: false, settings: %{}} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<0::24, 4::8, 0x0::8, 0::1, 0::31>> = binary + end + + test "serializes SETTINGS ACK frame" do + frame = %Frame.Settings{ack: true, settings: nil} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<0::24, 4::8, 0x1::8, 0::1, 0::31>> = binary + end + + test "serializes SETTINGS frame with max_concurrent_streams" do + frame = %Frame.Settings{ack: false, settings: %{max_concurrent_streams: 100}} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<6::24, 4::8, 0x0::8, 0::1, 0::31, 0x3::16, 100::32>> = binary + end + + test "serializes SETTINGS frame omitting default values" do + # Default values should be omitted + frame = %Frame.Settings{ + ack: false, + settings: %{ + header_table_size: 4096, + # default + max_concurrent_streams: 200, + # non-default + initial_window_size: 65535, + # default + max_frame_size: 16384 + # default + } + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Should only include max_concurrent_streams (non-default) + assert <<6::24, 4::8, 0x0::8, 0::1, 0::31, 0x3::16, 200::32>> = binary + end + end + + describe "gRPC-specific scenarios" do + test "handles gRPC recommended settings" do + # gRPC typically uses specific settings + frame = %Frame.Settings{ + settings: %{ + header_table_size: 8192, + max_concurrent_streams: 100, + initial_window_size: 1_048_576, + max_frame_size: 16384 + } + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Verify it's a valid SETTINGS frame (header_table_size, max_concurrent_streams, initial_window_size) + # max_frame_size with default value is omitted + <> = binary + assert length == 18 + end + + test "handles connection preface SETTINGS" do + # Client/server exchange SETTINGS during connection preface + settings = %Frame.Settings{ + settings: %{ + max_concurrent_streams: 100, + initial_window_size: 65535 + } + } + + result = Frame.serialize(settings, 16_384) + assert is_list(result) + end + + test "handles SETTINGS ACK response" do + # After receiving SETTINGS, peer must send SETTINGS ACK + ack = %Frame.Settings{ack: true, settings: nil} + + result = Frame.serialize(ack, 16_384) + binary = IO.iodata_to_binary(result) + + # Empty payload with ACK flag + assert <<0::24, 4::8, 0x1::8, 0::1, 0::31>> = binary + end + + test "handles window size updates via SETTINGS" do + # Changing SETTINGS_INITIAL_WINDOW_SIZE affects flow control + frame = %Frame.Settings{ + settings: %{ + initial_window_size: 1_048_576 + } + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + <<6::24, 4::8, 0x0::8, 0::1, 0::31, 0x4::16, 1_048_576::32>> = binary + end + + test "handles unlimited max_concurrent_streams" do + # :infinity means no limit on concurrent streams + frame = %Frame.Settings{ + settings: %{ + max_concurrent_streams: :infinity + } + } + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # :infinity is omitted (no setting sent) + assert <<0::24, 4::8, 0x0::8, 0::1, 0::31>> = binary + end + end +end diff --git a/grpc_core/test/grpc/transport/http2/frame/window_update_test.exs b/grpc_core/test/grpc/transport/http2/frame/window_update_test.exs new file mode 100644 index 000000000..0e145b0f7 --- /dev/null +++ b/grpc_core/test/grpc/transport/http2/frame/window_update_test.exs @@ -0,0 +1,285 @@ +defmodule GRPC.Transport.HTTP2.Frame.WindowUpdateTest do + use ExUnit.Case, async: true + import Bitwise + + alias GRPC.Transport.HTTP2.Frame + alias GRPC.Transport.HTTP2.Errors + + describe "WINDOW_UPDATE frame deserialization" do + test "deserializes WINDOW_UPDATE for stream" do + # WINDOW_UPDATE for stream 123, increment 1000 + data = <<4::24, 8::8, 0x0::8, 0::1, 123::31, 0::1, 1000::31>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 123, size_increment: 1000}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes WINDOW_UPDATE for connection (stream 0)" do + # WINDOW_UPDATE for connection-level flow control + data = <<4::24, 8::8, 0x0::8, 0::1, 0::31, 0::1, 65535::31>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 0, size_increment: 65535}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "deserializes WINDOW_UPDATE with maximum increment" do + # Maximum increment: 2^31 - 1 + max_increment = (1 <<< 31) - 1 + data = <<4::24, 8::8, 0x0::8, 0::1, 1::31, 0::1, max_increment::31>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 1, size_increment: ^max_increment}}, <<>>} = + Frame.deserialize(data, 16_384) + end + + test "rejects WINDOW_UPDATE with zero increment" do + # RFC 9113 §6.9: A receiver MUST treat increment of 0 as error + data = <<4::24, 8::8, 0x0::8, 0::1, 123::31, 0::1, 0::31>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.frame_size_error() + end + + test "rejects WINDOW_UPDATE with incorrect length" do + # RFC 9113 §6.9: WINDOW_UPDATE frames MUST be 4 bytes + data = <<2::24, 8::8, 0x0::8, 0::1, 123::31, 100::16>> + + assert {{:error, error_code, _reason}, <<>>} = Frame.deserialize(data, 16_384) + assert error_code == Errors.frame_size_error() + end + + test "handles reserved bit correctly" do + # Reserved bit (first bit) should be ignored + data = <<4::24, 8::8, 0x0::8, 0::1, 123::31, 1::1, 1000::31>> + + assert {{:ok, %Frame.WindowUpdate{stream_id: 123, size_increment: 1000}}, <<>>} = + Frame.deserialize(data, 16_384) + end + end + + describe "WINDOW_UPDATE frame serialization" do + test "serializes WINDOW_UPDATE for stream" do + frame = %Frame.WindowUpdate{stream_id: 123, size_increment: 1000} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 8::8, 0x0::8, 0::1, 123::31, 0::1, 1000::31>> = binary + end + + test "serializes WINDOW_UPDATE for connection" do + frame = %Frame.WindowUpdate{stream_id: 0, size_increment: 65535} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 8::8, 0x0::8, 0::1, 0::31, 0::1, 65535::31>> = binary + end + + test "serializes WINDOW_UPDATE with maximum increment" do + max_increment = (1 <<< 31) - 1 + frame = %Frame.WindowUpdate{stream_id: 1, size_increment: max_increment} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 8::8, 0x0::8, 0::1, 1::31, 0::1, ^max_increment::31>> = binary + end + + test "sets reserved bit to 0" do + frame = %Frame.WindowUpdate{stream_id: 123, size_increment: 1000} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + # Extract window size increment field and check reserved bit + <<_::9-bytes, reserved::1, increment::31>> = binary + assert reserved == 0 + assert increment == 1000 + end + end + + describe "gRPC-specific scenarios" do + test "handles stream-level window update after consuming data" do + # After processing DATA frame, update stream window + consumed = 8192 + window_update = %Frame.WindowUpdate{stream_id: 1, size_increment: consumed} + + result = Frame.serialize(window_update, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.size_increment == consumed + end + + test "handles connection-level window update" do + # Update connection-level window after processing multiple streams + connection_update = %Frame.WindowUpdate{ + stream_id: 0, + size_increment: 1_048_576 + } + + result = Frame.serialize(connection_update, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.stream_id == 0 + assert received.size_increment == 1_048_576 + end + + test "handles window exhaustion prevention" do + # Send window update before window is fully exhausted + # Keep at least 50% window available + initial_window = 65535 + consumed = div(initial_window, 2) + + update = %Frame.WindowUpdate{stream_id: 1, size_increment: consumed} + + result = Frame.serialize(update, 16_384) + assert is_list(result) + end + + test "handles large message flow control" do + # For large gRPC messages, need multiple window updates + chunk_size = 16384 + + updates = + for _i <- 1..10 do + %Frame.WindowUpdate{stream_id: 1, size_increment: chunk_size} + end + + serialized = Enum.map(updates, &Frame.serialize(&1, 16_384)) + + assert length(serialized) == 10 + assert Enum.all?(serialized, &is_list/1) + end + + test "handles window update for streaming RPCs" do + # Bidirectional streaming needs careful window management + # Client updates window as it consumes server data + client_update = %Frame.WindowUpdate{stream_id: 1, size_increment: 32768} + + # Server updates window as it consumes client data + server_update = %Frame.WindowUpdate{stream_id: 1, size_increment: 32768} + + client_binary = IO.iodata_to_binary(Frame.serialize(client_update, 16_384)) + server_binary = IO.iodata_to_binary(Frame.serialize(server_update, 16_384)) + + assert client_binary == server_binary + end + + test "handles window overflow detection" do + # Receiving window update that would overflow window size + # Current window: 65535, increment: max_value would exceed 2^31-1 + # Implementation should detect this as flow control error + + # Send legitimate max increment + max_increment = (1 <<< 31) - 1 + frame = %Frame.WindowUpdate{stream_id: 1, size_increment: max_increment} + + result = Frame.serialize(frame, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.size_increment == max_increment + end + + test "handles immediate window update strategy" do + # Eagerly update window after every DATA frame + data_frame_size = 8192 + + update = %Frame.WindowUpdate{ + stream_id: 1, + size_increment: data_frame_size + } + + result = Frame.serialize(update, 16_384) + assert is_list(result) + end + + test "handles batched window updates" do + # Buffer multiple DATA frames, then send one window update + total_consumed = 8192 + 8192 + 4096 + + update = %Frame.WindowUpdate{ + stream_id: 1, + size_increment: total_consumed + } + + result = Frame.serialize(update, 16_384) + binary = IO.iodata_to_binary(result) + + {{:ok, received}, <<>>} = Frame.deserialize(binary, 16_384) + assert received.size_increment == total_consumed + end + + test "handles connection vs stream window priority" do + # Both connection and stream windows must be available + # Update both when consuming data + + connection_update = %Frame.WindowUpdate{stream_id: 0, size_increment: 16384} + stream_update = %Frame.WindowUpdate{stream_id: 1, size_increment: 16384} + + conn_binary = IO.iodata_to_binary(Frame.serialize(connection_update, 16_384)) + stream_binary = IO.iodata_to_binary(Frame.serialize(stream_update, 16_384)) + + {{:ok, conn}, <<>>} = Frame.deserialize(conn_binary, 16_384) + {{:ok, stream}, <<>>} = Frame.deserialize(stream_binary, 16_384) + + assert conn.stream_id == 0 + assert stream.stream_id == 1 + assert conn.size_increment == stream.size_increment + end + + test "handles window update timing for backpressure" do + # Delay window update to apply backpressure on sender + # Send smaller increments to slow down data flow + throttled_increment = 4096 + + update = %Frame.WindowUpdate{ + stream_id: 1, + size_increment: throttled_increment + } + + result = Frame.serialize(update, 16_384) + binary = IO.iodata_to_binary(result) + + assert <<4::24, 8::8, 0x0::8, 0::1, 1::31, 0::1, ^throttled_increment::31>> = binary + end + end + + describe "flow control scenarios" do + test "handles window depletion and replenishment" do + # Start with initial window: 65535 + # Consume data in chunks, replenish periodically + + chunks = [16384, 16384, 16384, 16383] + + updates = + for {chunk, idx} <- Enum.with_index(chunks, 1) do + %Frame.WindowUpdate{stream_id: idx, size_increment: chunk} + end + + total_increment = Enum.sum(chunks) + assert total_increment == 65535 + + serialized = Enum.map(updates, &Frame.serialize(&1, 16_384)) + assert length(serialized) == 4 + end + + test "handles concurrent stream window updates" do + # Multiple active streams, each with independent windows + stream_updates = + for stream_id <- 1..10 do + %Frame.WindowUpdate{stream_id: stream_id, size_increment: 8192} + end + + # Plus connection-level update + connection_update = %Frame.WindowUpdate{stream_id: 0, size_increment: 81920} + + all_updates = [connection_update | stream_updates] + serialized = Enum.map(all_updates, &Frame.serialize(&1, 16_384)) + + assert length(serialized) == 11 + end + end +end From c0b0118736b2822e0933c87f65ec342355417cd4 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 18:56:04 -0300 Subject: [PATCH 13/47] test(grpc_core): add comprehensive HTTP/2 frame tests for gRPC scenarios Add detailed test coverage for HTTP/2 frame implementations in the frame/ directory, focusing on gRPC-specific use cases and edge cases. These tests cover gRPC-specific HTTP/2 scenarios including trailers-only responses, large message handling, connection keepalive, and flow control patterns commonly used in gRPC implementations. --- .../test/grpc/transport/http2/frame_test.exs | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/grpc_core/test/grpc/transport/http2/frame_test.exs b/grpc_core/test/grpc/transport/http2/frame_test.exs index 97075b74b..bed6bfe6e 100644 --- a/grpc_core/test/grpc/transport/http2/frame_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame_test.exs @@ -186,7 +186,8 @@ defmodule GRPC.Transport.HTTP2.FrameTest do test "set?/2 guard works correctly" do require Frame.Flags - flags = 0b00000101 # bits 0 and 2 set + # bits 0 and 2 set + flags = 0b00000101 assert Frame.Flags.set?(flags, 0) refute Frame.Flags.set?(flags, 1) @@ -197,7 +198,8 @@ defmodule GRPC.Transport.HTTP2.FrameTest do test "clear?/2 guard works correctly" do require Frame.Flags - flags = 0b00000101 # bits 0 and 2 set + # bits 0 and 2 set + flags = 0b00000101 refute Frame.Flags.clear?(flags, 0) assert Frame.Flags.clear?(flags, 1) @@ -236,22 +238,32 @@ defmodule GRPC.Transport.HTTP2.FrameTest do # Types 1-9 can be tested with stream_id=0 (except some need valid payload) for type <- 1..9 do # Use appropriate payloads for each type - {stream_id, payload} = case type do - 1 -> {1, <<0, 0, 0, 0>>} # HEADERS needs priority data - 2 -> {1, <<0, 0, 0, 0, 0>>} # PRIORITY needs 5 bytes - 3 -> {1, <<0, 0, 0, 0>>} # RST_STREAM needs 4 bytes - 4 -> {0, <<>>} # SETTINGS - 5 -> {1, <<0, 0, 0, 0>>} # PUSH_PROMISE needs promised stream id - 6 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} # PING needs 8 bytes - 7 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} # GOAWAY needs 8 bytes - 8 -> {1, <<0, 0, 0, 100>>} # WINDOW_UPDATE needs 4 bytes - 9 -> {1, <<>>} # CONTINUATION - end - + {stream_id, payload} = + case type do + # HEADERS needs priority data + 1 -> {1, <<0, 0, 0, 0>>} + # PRIORITY needs 5 bytes + 2 -> {1, <<0, 0, 0, 0, 0>>} + # RST_STREAM needs 4 bytes + 3 -> {1, <<0, 0, 0, 0>>} + # SETTINGS + 4 -> {0, <<>>} + # PUSH_PROMISE needs promised stream id + 5 -> {1, <<0, 0, 0, 0>>} + # PING needs 8 bytes + 6 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} + # GOAWAY needs 8 bytes + 7 -> {0, <<0, 0, 0, 0, 0, 0, 0, 0>>} + # WINDOW_UPDATE needs 4 bytes + 8 -> {1, <<0, 0, 0, 100>>} + # CONTINUATION + 9 -> {1, <<>>} + end + length = byte_size(payload) data = <> result = Frame.deserialize(data, 16_384) - + # Should not return Unknown frame for types 0-9 assert {{:ok, frame}, _} = result refute match?(%Frame.Unknown{}, frame), "Type #{type} returned Unknown frame" From c3acd0459618d4755bea76ec9d8e7e0f086d72e2 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 19:00:04 -0300 Subject: [PATCH 14/47] fix(tests): resolve Elixir 1.18 deprecation warnings for Enum.slice/2 --- .../http2/frame/continuation_test.exs | 38 +++++++------------ .../grpc/transport/http2/frame/data_test.exs | 2 - .../transport/http2/frame/headers_test.exs | 10 ++--- 3 files changed, 17 insertions(+), 33 deletions(-) diff --git a/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs index 837cd9d1e..2db1e2eaa 100644 --- a/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs @@ -9,16 +9,16 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # CONTINUATION with END_HEADERS flag data = <<3::24, 9::8, 0x4::8, 0::1, 1::31, "hdr">> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "hdr"}}, <<>>} = - Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "hdr"}}, + <<>>} = Frame.deserialize(data, 16_384) end test "deserializes CONTINUATION without END_HEADERS" do # More CONTINUATION frames will follow data = <<3::24, 9::8, 0x0::8, 0::1, 1::31, "hdr">> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: false, fragment: "hdr"}}, <<>>} = - Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: false, fragment: "hdr"}}, + <<>>} = Frame.deserialize(data, 16_384) end test "deserializes CONTINUATION with large fragment" do @@ -41,8 +41,8 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Edge case: CONTINUATION with no payload (unusual but valid) data = <<0::24, 9::8, 0x4::8, 0::1, 1::31>> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: <<>>}}, <<>>} = - Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: <<>>}}, + <<>>} = Frame.deserialize(data, 16_384) end end @@ -91,10 +91,9 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Serialize with small frame size to force split frames_io = Frame.serialize(headers, 20) - + # Should produce [HEADERS, CONTINUATION] - # 3 frames for 51 bytes with 20 byte limit - assert length(frames_io) == 3 + assert length(frames_io) == 3 # 3 frames for 51 bytes with 20 byte limit # Deserialize first frame (HEADERS) [h_io, c1_io, c2_io] = frames_io @@ -107,11 +106,9 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do {{:ok, c2_frame}, <<>>} = Frame.deserialize(c2_binary, 16_384) # Reconstruct full header block - reconstructed = - IO.iodata_to_binary([h_frame.fragment, c1_frame.fragment, c2_frame.fragment]) - + reconstructed = IO.iodata_to_binary([h_frame.fragment, c1_frame.fragment, c2_frame.fragment]) assert reconstructed == full_headers - + # First HEADERS should not have END_HEADERS assert h_frame.end_headers == false # Middle CONTINUATION should not have END_HEADERS @@ -128,12 +125,12 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Serialize with small max_frame_size frames_io = Frame.serialize(headers, 5) - + # Should split into multiple frames assert length(frames_io) == 3 [h_io, c1_io, c2_io] = frames_io - + h_bin = IO.iodata_to_binary(h_io) c1_bin = IO.iodata_to_binary(c1_io) c2_bin = IO.iodata_to_binary(c2_io) @@ -182,14 +179,7 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do chunks = if remainder_size > 0 do - chunks ++ - [ - binary_part( - metadata_headers, - byte_size(metadata_headers) - remainder_size, - remainder_size - ) - ] + chunks ++ [binary_part(metadata_headers, byte_size(metadata_headers) - remainder_size, remainder_size)] else chunks end @@ -199,7 +189,7 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do headers = %Frame.Headers{stream_id: 1, end_headers: false, fragment: first} # Middle chunks in CONTINUATION without END_HEADERS - middle = Enum.slice(rest, 0..-2//-1) + middle = Enum.slice(rest, 0..-2//1) continuations = for {chunk, _idx} <- Enum.with_index(middle) do diff --git a/grpc_core/test/grpc/transport/http2/frame/data_test.exs b/grpc_core/test/grpc/transport/http2/frame/data_test.exs index 62a9e56d8..a3ceb3a58 100644 --- a/grpc_core/test/grpc/transport/http2/frame/data_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame/data_test.exs @@ -134,9 +134,7 @@ defmodule GRPC.Transport.HTTP2.Frame.DataTest do result = Frame.serialize(frame, 16_384) - # Result should be iodata (list) assert is_list(result) - # But should flatten correctly assert IO.iodata_to_binary(result) =~ "hello world" end end diff --git a/grpc_core/test/grpc/transport/http2/frame/headers_test.exs b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs index 07d2f8b50..53000f871 100644 --- a/grpc_core/test/grpc/transport/http2/frame/headers_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs @@ -34,9 +34,7 @@ defmodule GRPC.Transport.HTTP2.Frame.HeadersTest do # Flags: PRIORITY (0x20) + END_HEADERS (0x4) = 0x24 # Priority: exclusive=1, dependency=5, weight=10 priority = <<1::1, 5::31, 10::8>> - - data = - < "hdr")::24, 1::8, 0x24::8, 0::1, 1::31, priority::binary, "hdr">> + data = < "hdr")::24, 1::8, 0x24::8, 0::1, 1::31, priority::binary, "hdr">> assert {{:ok, %Frame.Headers{ @@ -139,16 +137,14 @@ defmodule GRPC.Transport.HTTP2.Frame.HeadersTest do assert flags1 == 0x0 # Middle CONTINUATION: no END_HEADERS - middle_frames = Enum.slice(continuation_frames, 0..-2//-1) + middle_frames = Enum.slice(continuation_frames, 0..-2//1) for [<<_::24, 9::8, flags::8, _::binary>>, _] <- middle_frames do assert flags == 0x0 end # Last CONTINUATION: END_HEADERS (0x4) - [[<<_::24, 9::8, last_flags::8, _::binary>>, _]] = - Enum.slice(continuation_frames, -1..-1//1) - + [[<<_::24, 9::8, last_flags::8, _::binary>>, _]] = Enum.slice(continuation_frames, -1..-1//1) assert last_flags == 0x4 end From 00fd46dda2a9bb7586dba391265ed0e14e5da4c0 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 19:00:52 -0300 Subject: [PATCH 15/47] format --- .../http2/frame/continuation_test.exs | 36 ++++++++++++------- .../transport/http2/frame/headers_test.exs | 8 +++-- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs index 2db1e2eaa..82cddce2c 100644 --- a/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame/continuation_test.exs @@ -9,16 +9,16 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # CONTINUATION with END_HEADERS flag data = <<3::24, 9::8, 0x4::8, 0::1, 1::31, "hdr">> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "hdr"}}, - <<>>} = Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) end test "deserializes CONTINUATION without END_HEADERS" do # More CONTINUATION frames will follow data = <<3::24, 9::8, 0x0::8, 0::1, 1::31, "hdr">> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: false, fragment: "hdr"}}, - <<>>} = Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: false, fragment: "hdr"}}, <<>>} = + Frame.deserialize(data, 16_384) end test "deserializes CONTINUATION with large fragment" do @@ -41,8 +41,8 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Edge case: CONTINUATION with no payload (unusual but valid) data = <<0::24, 9::8, 0x4::8, 0::1, 1::31>> - assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: <<>>}}, - <<>>} = Frame.deserialize(data, 16_384) + assert {{:ok, %Frame.Continuation{stream_id: 1, end_headers: true, fragment: <<>>}}, <<>>} = + Frame.deserialize(data, 16_384) end end @@ -91,9 +91,10 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Serialize with small frame size to force split frames_io = Frame.serialize(headers, 20) - + # Should produce [HEADERS, CONTINUATION] - assert length(frames_io) == 3 # 3 frames for 51 bytes with 20 byte limit + # 3 frames for 51 bytes with 20 byte limit + assert length(frames_io) == 3 # Deserialize first frame (HEADERS) [h_io, c1_io, c2_io] = frames_io @@ -106,9 +107,11 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do {{:ok, c2_frame}, <<>>} = Frame.deserialize(c2_binary, 16_384) # Reconstruct full header block - reconstructed = IO.iodata_to_binary([h_frame.fragment, c1_frame.fragment, c2_frame.fragment]) + reconstructed = + IO.iodata_to_binary([h_frame.fragment, c1_frame.fragment, c2_frame.fragment]) + assert reconstructed == full_headers - + # First HEADERS should not have END_HEADERS assert h_frame.end_headers == false # Middle CONTINUATION should not have END_HEADERS @@ -125,12 +128,12 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do # Serialize with small max_frame_size frames_io = Frame.serialize(headers, 5) - + # Should split into multiple frames assert length(frames_io) == 3 [h_io, c1_io, c2_io] = frames_io - + h_bin = IO.iodata_to_binary(h_io) c1_bin = IO.iodata_to_binary(c1_io) c2_bin = IO.iodata_to_binary(c2_io) @@ -179,7 +182,14 @@ defmodule GRPC.Transport.HTTP2.Frame.ContinuationTest do chunks = if remainder_size > 0 do - chunks ++ [binary_part(metadata_headers, byte_size(metadata_headers) - remainder_size, remainder_size)] + chunks ++ + [ + binary_part( + metadata_headers, + byte_size(metadata_headers) - remainder_size, + remainder_size + ) + ] else chunks end diff --git a/grpc_core/test/grpc/transport/http2/frame/headers_test.exs b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs index 53000f871..08a9de45e 100644 --- a/grpc_core/test/grpc/transport/http2/frame/headers_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame/headers_test.exs @@ -34,7 +34,9 @@ defmodule GRPC.Transport.HTTP2.Frame.HeadersTest do # Flags: PRIORITY (0x20) + END_HEADERS (0x4) = 0x24 # Priority: exclusive=1, dependency=5, weight=10 priority = <<1::1, 5::31, 10::8>> - data = < "hdr")::24, 1::8, 0x24::8, 0::1, 1::31, priority::binary, "hdr">> + + data = + < "hdr")::24, 1::8, 0x24::8, 0::1, 1::31, priority::binary, "hdr">> assert {{:ok, %Frame.Headers{ @@ -144,7 +146,9 @@ defmodule GRPC.Transport.HTTP2.Frame.HeadersTest do end # Last CONTINUATION: END_HEADERS (0x4) - [[<<_::24, 9::8, last_flags::8, _::binary>>, _]] = Enum.slice(continuation_frames, -1..-1//1) + [[<<_::24, 9::8, last_flags::8, _::binary>>, _]] = + Enum.slice(continuation_frames, -1..-1//1) + assert last_flags == 0x4 end From 62b9a2767ff3e64870f396e72e3a0d394e25f1ab Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Wed, 10 Dec 2025 19:56:29 -0300 Subject: [PATCH 16/47] fix(thousand_island): handle async response sending for stream lifecycle --- .../adapters/thousand_island/handler.ex | 13 +- .../lib/grpc/server/http2/connection.ex | 309 ++++++++++++------ 2 files changed, 212 insertions(+), 110 deletions(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index c4012546f..577f02c54 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -124,15 +124,22 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do new_state = if map_size(accumulated) > 0 do - Connection.send_headers(socket, stream_id, accumulated, state.connection) + updated_conn = Connection.send_headers(socket, stream_id, accumulated, state.connection) # Clear accumulated headers for this stream - %{state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id)} + %{ + state + | accumulated_headers: Map.delete(state.accumulated_headers, stream_id), + connection: updated_conn + } else state end # Send trailers (headers with END_STREAM) for streaming - Connection.send_trailers(socket, stream_id, trailers, new_state.connection) + # This will also remove the stream from the connection + updated_connection = Connection.send_trailers(socket, stream_id, trailers, new_state.connection) + + new_state = %{new_state | connection: updated_connection} {:noreply, {socket, new_state}} end diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex index 392b3e9fe..0c0fb9feb 100644 --- a/grpc_server/lib/grpc/server/http2/connection.ex +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -82,66 +82,99 @@ defmodule GRPC.Server.HTTP2.Connection do Send headers for streaming response. """ def send_headers(socket, stream_id, headers, connection) do - # Encode headers using HPAX - convert map to list of tuples - Logger.debug("[send_headers] stream_id=#{stream_id}, headers=#{inspect(headers)}") - headers_list = if is_map(headers), do: Map.to_list(headers), else: headers - {header_block, _new_hpack} = HPAX.encode(:no_store, headers_list, connection.send_hpack_state) + # Check if stream still exists (may have been closed by RST_STREAM) + unless Map.has_key?(connection.streams, stream_id) do + Logger.warning( + "[send_headers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" + ) - # Send HEADERS frame without END_STREAM - frame = %Frame.Headers{ - stream_id: stream_id, - fragment: header_block, - end_stream: false, - end_headers: true - } + connection + else + # Encode headers using HPAX - convert map to list of tuples + Logger.debug("[send_headers] stream_id=#{stream_id}, headers=#{inspect(headers)}") + headers_list = if is_map(headers), do: Map.to_list(headers), else: headers + + {header_block, _new_hpack} = + HPAX.encode(:no_store, headers_list, connection.send_hpack_state) + + # Send HEADERS frame without END_STREAM + frame = %Frame.Headers{ + stream_id: stream_id, + fragment: header_block, + end_stream: false, + end_headers: true + } - send_frame(frame, socket, connection) + send_frame(frame, socket, connection) + end end @doc """ Send data frame for streaming response. """ def send_data(socket, stream_id, data, end_stream, connection) do - # Send DATA frame - frame = %Frame.Data{ - stream_id: stream_id, - data: data, - end_stream: end_stream - } + # Check if stream still exists (may have been closed by RST_STREAM) + unless Map.has_key?(connection.streams, stream_id) do + Logger.warning( + "[send_data] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" + ) - send_frame(frame, socket, connection) + connection + else + # Send DATA frame + frame = %Frame.Data{ + stream_id: stream_id, + data: data, + end_stream: end_stream + } + + send_frame(frame, socket, connection) + end end @doc """ Send trailers (headers with END_STREAM) for streaming response. """ def send_trailers(socket, stream_id, trailers, connection) do - # Encode custom metadata (handles -bin suffix base64 encoding) - # Note: encode_metadata filters out reserved headers like grpc-status - encoded_custom = GRPC.Transport.HTTP2.encode_metadata(trailers) - - # Re-add reserved headers (grpc-status, etc) that were filtered out - encoded_trailers = - Map.merge( - Map.take(trailers, ["grpc-status", "grpc-message"]), - encoded_custom + # Check if stream still exists (may have been closed by RST_STREAM) + unless Map.has_key?(connection.streams, stream_id) do + Logger.warning( + "[send_trailers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" ) - # Convert map to list - trailer_list = Map.to_list(encoded_trailers) + connection + else + # Encode custom metadata (handles -bin suffix base64 encoding) + # Note: encode_metadata filters out reserved headers like grpc-status + encoded_custom = GRPC.Transport.HTTP2.encode_metadata(trailers) + + # Re-add reserved headers (grpc-status, etc) that were filtered out + encoded_trailers = + Map.merge( + Map.take(trailers, ["grpc-status", "grpc-message"]), + encoded_custom + ) - {trailer_block, _new_hpack} = - HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) + # Convert map to list + trailer_list = Map.to_list(encoded_trailers) - # Send HEADERS frame with END_STREAM - frame = %Frame.Headers{ - stream_id: stream_id, - fragment: trailer_block, - end_stream: true, - end_headers: true - } + {trailer_block, _new_hpack} = + HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) + + # Send HEADERS frame with END_STREAM + frame = %Frame.Headers{ + stream_id: stream_id, + fragment: trailer_block, + end_stream: true, + end_headers: true + } - send_frame(frame, socket, connection) + connection = send_frame(frame, socket, connection) + + # Remove stream after sending END_STREAM (RFC 7540: stream transitions to closed) + Logger.debug("[send_trailers] Removing stream #{stream_id} after sending END_STREAM") + %{connection | streams: Map.delete(connection.streams, stream_id)} + end end @doc """ @@ -421,43 +454,68 @@ defmodule GRPC.Server.HTTP2.Connection do defp handle_headers_frame(frame, _socket, connection) do Logger.info("[handle_headers_frame] Decoding HPACK for stream #{frame.stream_id}") - case HPAX.decode(frame.fragment, connection.recv_hpack_state) do - {:ok, headers, new_hpack_state} -> - Logger.info( - "[handle_headers_frame] Decoded headers for stream #{frame.stream_id}: #{inspect(headers)}" - ) + # Check if this is trailers for an existing stream + case Map.get(connection.streams, frame.stream_id) do + nil -> + # New stream - decode headers and create stream state + case HPAX.decode(frame.fragment, connection.recv_hpack_state) do + {:ok, headers, new_hpack_state} -> + Logger.info( + "[handle_headers_frame] Decoded headers for stream #{frame.stream_id}: #{inspect(headers)}" + ) - # Create stream state from headers - stream_state = - StreamState.from_headers( - frame.stream_id, - headers, - connection.local_settings.initial_window_size - ) + # Create stream state from headers + stream_state = + StreamState.from_headers( + frame.stream_id, + headers, + connection.local_settings.initial_window_size + ) - # Add handler_pid for streaming support - stream_state = %{stream_state | handler_pid: connection.handler_pid} + # Add handler_pid for streaming support + stream_state = %{stream_state | handler_pid: connection.handler_pid} - # Check if this is bidirectional streaming - # For bidi, we need to process messages as they arrive (not wait for END_STREAM) - is_bidi = - GRPC.Server.HTTP2.Dispatcher.is_bidi_streaming?(stream_state.path, connection.servers) + # Check if this is bidirectional streaming + # For bidi, we need to process messages as they arrive (not wait for END_STREAM) + is_bidi = + GRPC.Server.HTTP2.Dispatcher.is_bidi_streaming?(stream_state.path, connection.servers) - stream_state = %{stream_state | is_bidi_streaming: is_bidi} + stream_state = %{stream_state | is_bidi_streaming: is_bidi} - if is_bidi do - Logger.info( - "[handle_headers_frame] Stream #{frame.stream_id} is bidirectional streaming" - ) + if is_bidi do + Logger.info( + "[handle_headers_frame] Stream #{frame.stream_id} is bidirectional streaming" + ) + end + + # Store stream in connection + streams = Map.put(connection.streams, frame.stream_id, stream_state) + + %{connection | recv_hpack_state: new_hpack_state, streams: streams} + + {:error, reason} -> + connection_error!("HPACK decode error: #{inspect(reason)}") end - # Store stream in connection - streams = Map.put(connection.streams, frame.stream_id, stream_state) + _stream_state -> + # Trailers for existing stream - just decode but don't create new stream + # This can happen when client sends trailers after we've sent response/error + Logger.info( + "[handle_headers_frame] Ignoring trailers for stream #{frame.stream_id} (stream already processed)" + ) - %{connection | recv_hpack_state: new_hpack_state, streams: streams} + case HPAX.decode(frame.fragment, connection.recv_hpack_state) do + {:ok, _headers, new_hpack_state} -> + %{connection | recv_hpack_state: new_hpack_state} + + {:error, reason} -> + Logger.warning( + "[handle_headers_frame] Failed to decode trailers for stream #{frame.stream_id}: #{inspect(reason)}" + ) - {:error, reason} -> - connection_error!("HPACK decode error: #{inspect(reason)}") + # Continue without updating HPACK state to avoid connection error + connection + end end end @@ -518,7 +576,7 @@ defmodule GRPC.Server.HTTP2.Connection do ThousandIsland.Socket.send(socket, iodata) end - :ok + connection end defp connection_error!(message) do @@ -690,13 +748,22 @@ defmodule GRPC.Server.HTTP2.Connection do end end - # Remove stream from connection - Logger.debug("[process_grpc_request] Removing stream #{stream_state.stream_id}") + # DON'T remove stream here for ThousandIsland adapter! + # The adapter sends async messages ({:grpc_send_data}, {:grpc_send_trailers}) + # that will be processed later by handle_info in the Handler. + # The stream will be removed when send_grpc_trailers is called (which sends END_STREAM). + # + # For Cowboy adapter (synchronous), the response is sent immediately during dispatch, + # so the stream can be removed here. But for ThousandIsland, we need to wait for + # the async messages to be processed. + # + # TODO: Add a flag to StreamState to indicate if response was fully sent, + # or let the trailers handler remove the stream after sending END_STREAM. + Logger.debug( + "[process_grpc_request] Keeping stream #{stream_state.stream_id} alive for async response (will be removed after trailers)" + ) - %{ - updated_connection - | streams: Map.delete(updated_connection.streams, stream_state.stream_id) - } + updated_connection end end rescue @@ -732,28 +799,38 @@ defmodule GRPC.Server.HTTP2.Connection do end defp send_grpc_trailers(socket, stream_id, trailers, connection) do - Logger.debug( - "[send_grpc_trailers] Sending trailers for stream #{stream_id}: #{inspect(trailers)}" - ) + # Check if stream still exists (may have been closed by RST_STREAM) + unless Map.has_key?(connection.streams, stream_id) do + Logger.warning( + "[send_grpc_trailers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" + ) - # Convert map to list of tuples for HPAX - trailer_list = Map.to_list(trailers) + connection + else + Logger.debug( + "[send_grpc_trailers] Sending trailers for stream #{stream_id}: #{inspect(trailers)}" + ) - # Encode trailers using HPACK - {trailer_block, new_hpack} = HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) + # Convert map to list of tuples for HPAX + trailer_list = Map.to_list(trailers) - # Send HEADERS frame with END_STREAM flag - headers_frame = %Frame.Headers{ - stream_id: stream_id, - fragment: trailer_block, - end_stream: true, - end_headers: true - } + # Encode trailers using HPACK + {trailer_block, new_hpack} = + HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) - send_frame(headers_frame, socket, connection) + # Send HEADERS frame with END_STREAM flag + headers_frame = %Frame.Headers{ + stream_id: stream_id, + fragment: trailer_block, + end_stream: true, + end_headers: true + } - # Return updated connection with new HPACK state - %{connection | send_hpack_state: new_hpack} + send_frame(headers_frame, socket, connection) + + # Return updated connection with new HPACK state + %{connection | send_hpack_state: new_hpack} + end end # OPTIMIZATION: Send headers + data + trailers in one syscall (Bandit-style batching) @@ -923,20 +1000,29 @@ defmodule GRPC.Server.HTTP2.Connection do headers = %{":status" => "200", "content-type" => "application/grpc+proto"} send_headers(socket, stream_id, headers, connection) - # Mark headers as sent in the stream state - updated_stream = %{stream_state | headers_sent: true} - updated_conn = put_in(connection.streams[stream_id], updated_stream) + # Verify stream still exists after send_headers (may have been closed by RST_STREAM) + if Map.has_key?(connection.streams, stream_id) do + # Mark headers as sent in the stream state + updated_stream = %{stream_state | headers_sent: true} + updated_conn = put_in(connection.streams[stream_id], updated_stream) - trailers = %{ - "grpc-status" => to_string(status_code), - "grpc-message" => message - } + trailers = %{ + "grpc-status" => to_string(status_code), + "grpc-message" => message + } - Logger.warning( - "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" - ) + Logger.warning( + "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" + ) + + send_grpc_trailers(socket, stream_id, trailers, updated_conn) + else + Logger.warning( + "[SEND_GRPC_ERROR] SKIPPED trailers - stream=#{stream_id} was closed after sending headers" + ) - send_grpc_trailers(socket, stream_id, trailers, updated_conn) + connection + end else # If headers were sent OR stream received END_STREAM, just send trailers if end_stream_received && !headers_sent do @@ -957,11 +1043,20 @@ defmodule GRPC.Server.HTTP2.Connection do send_grpc_trailers(socket, stream_id, trailers, connection) end - # Marcar que já enviamos erro (RFC 7540: após END_STREAM, o stream está closed) + # Verify stream still exists before marking error_sent (may have been closed by RST_STREAM) updated_connection = - update_in(updated_connection.streams[stream_id], fn s -> - if s, do: %{s | error_sent: true}, else: nil - end) + if Map.has_key?(updated_connection.streams, stream_id) do + # Marcar que já enviamos erro (RFC 7540: após END_STREAM, o stream está closed) + update_in(updated_connection.streams[stream_id], fn s -> + if s, do: %{s | error_sent: true}, else: nil + end) + else + Logger.warning( + "[SEND_GRPC_ERROR] SKIPPED marking error_sent - stream=#{stream_id} was already closed" + ) + + updated_connection + end # RFC 7540: Após enviar END_STREAM, o stream transiciona para "closed" # Remover imediatamente para evitar processar mais mensagens neste stream From f11e679fb4eb4b09b32fdfd506a44de947bab17b Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Thu, 11 Dec 2025 00:16:46 -0300 Subject: [PATCH 17/47] fix(grpc_server): ensure HEADERS sent before TRAILERS in error responses HTTP/2 protocol (RFC 9113) requires that HEADERS frame with :status pseudo-header must be sent before any TRAILERS frame. The previous implementation was conditionally skipping HEADERS when a stream had already received END_STREAM from the client, causing protocol errors. This fix ensures that send_grpc_error ALWAYS sends HTTP/2 HEADERS (with required :status and :content-type headers) before sending TRAILERS (with grpc-status and grpc-message), regardless of the stream state (half-closed remote or not). This resolves the 'timeout_on_sleeping_server' interop test failure where Gun client was rejecting error responses with the message: 'A required pseudo-header was not found'. --- grpc_server/lib/grpc/server/http2/connection.ex | 14 ++++---------- interop/lib/interop/client.ex | 2 +- interop/script/run.exs | 8 ++++---- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex index 0c0fb9feb..0bb9360eb 100644 --- a/grpc_server/lib/grpc/server/http2/connection.ex +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -989,10 +989,10 @@ defmodule GRPC.Server.HTTP2.Connection do else # Check if headers were already sent for this stream - # If headers not sent yet AND stream hasn't received END_STREAM, send HTTP/2 headers first - # If stream received END_STREAM, it's half-closed remote - we can only send TRAILERS + # ALWAYS send headers first if not sent yet, even if stream received END_STREAM + # HTTP/2 requires :status pseudo-header before trailers updated_connection = - if !headers_sent && !end_stream_received do + if !headers_sent do Logger.warning( "[SEND_GRPC_ERROR] Sending HTTP/2 headers first for stream=#{stream_id}" ) @@ -1024,13 +1024,7 @@ defmodule GRPC.Server.HTTP2.Connection do connection end else - # If headers were sent OR stream received END_STREAM, just send trailers - if end_stream_received && !headers_sent do - Logger.warning( - "[SEND_GRPC_ERROR] Stream #{stream_id} received END_STREAM, skipping HEADERS, sending only TRAILERS" - ) - end - + # Headers already sent, just send trailers with error trailers = %{ "grpc-status" => to_string(status_code), "grpc-message" => message diff --git a/interop/lib/interop/client.ex b/interop/lib/interop/client.ex index 908815fc6..0c0f2489c 100644 --- a/interop/lib/interop/client.ex +++ b/interop/lib/interop/client.ex @@ -326,7 +326,7 @@ defmodule Interop.Client do } stream = Grpc.Testing.TestService.Stub.full_duplex_call(ch, timeout: 1) - resp = stream |> GRPC.Stub.send_request(req) |> GRPC.Stub.recv() + resp = stream |> GRPC.Stub.send_request(req, end_stream: true) |> GRPC.Stub.recv() case resp do {:error, %GRPC.RPCError{status: 4}} -> diff --git a/interop/script/run.exs b/interop/script/run.exs index 72fc1e792..9d65e86c7 100644 --- a/interop/script/run.exs +++ b/interop/script/run.exs @@ -26,7 +26,7 @@ server_adapters = case server_adapter_name do "cowboy" -> [GRPC.Server.Adapters.Cowboy] "thousand_island" -> [GRPC.Server.Adapters.ThousandIsland] "both" -> [GRPC.Server.Adapters.Cowboy, GRPC.Server.Adapters.ThousandIsland] - _ -> + _ -> IO.puts("Unknown adapter: #{server_adapter_name}. Using both.") [GRPC.Server.Adapters.Cowboy, GRPC.Server.Adapters.ThousandIsland] end @@ -82,12 +82,12 @@ for server_adapter <- server_adapters do Logger.info("========================================") Logger.info("Testing with SERVER ADAPTER: #{server_name}") Logger.info("========================================") - + {:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, port, adapter: server_adapter) Logger.info("Server started on port #{test_port}") # Give server time to fully initialize Process.sleep(100) - + for client_adapter <- client_adapters do client_name = client_adapter |> Module.split() |> List.last() Logger.info("Starting run for client adapter: #{client_name}") @@ -98,7 +98,7 @@ for server_adapter <- server_adapters do |> Task.async_stream(InteropTestRunner, :run, args, stream_opts) |> Enum.to_list() end - + :ok = GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: server_adapter) Logger.info("Completed tests for #{server_name}") end From 7f2325185bd2c7e98c34f562adb2d98d424f0e6d Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Thu, 11 Dec 2025 01:13:43 -0300 Subject: [PATCH 18/47] chore: remove comments, change log level, and some adjustments --- .../grpc/server/adapters/thousand_island.ex | 12 - .../server/adapters/thousand_island/README.md | 236 --------------- .../adapters/thousand_island/handler.ex | 33 +-- .../adapters/thousand_island/http2.back | 275 ------------------ .../lib/grpc/server/http2/connection.ex | 53 ++-- .../lib/grpc/server/http2/dispatcher.ex | 6 +- 6 files changed, 34 insertions(+), 581 deletions(-) delete mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/README.md delete mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/http2.back diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index f581a67c0..6ad9c49e2 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -365,10 +365,8 @@ defmodule GRPC.Server.Adapters.ThousandIsland do """ @impl true def start(endpoint, servers, port, opts) do - # Initialize ETS cache for codecs/compressors lookup GRPC.Server.Cache.init() - # Ensure Task.Supervisor is started (for direct start_link calls outside supervision tree) case Process.whereis(GRPC.Server.StreamTaskSupervisor) do nil -> {:ok, _} = Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) @@ -381,7 +379,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do case ThousandIsland.start_link(server_opts) do {:ok, pid} -> - # Get actual port (important when port=0 for random port) actual_port = get_actual_port(pid, port) {:ok, pid, actual_port} @@ -414,7 +411,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do def child_spec(endpoint, servers, port, opts) do server_opts = build_server_opts(endpoint, servers, port, opts) - # Initialize ETS cache for codecs/compressors lookup GRPC.Server.Cache.init() scheme = if cred_opts(opts), do: :https, else: :http @@ -425,7 +421,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do server_name = servers_name(endpoint, servers) - # Create children for the supervisor children = [ {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, %{ @@ -455,7 +450,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do @spec read_body(GRPC.Server.Adapter.state()) :: {:ok, binary()} def read_body(%{data: data}) do - # Data is already in payload, return it directly {:ok, data} end @@ -475,7 +469,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end def set_headers(%{handler_pid: pid, stream_id: stream_id}, headers) do - # Send message to accumulate headers in handler state send(pid, {:grpc_accumulate_headers, stream_id, headers}) :ok end @@ -488,12 +481,10 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end def get_headers(%{headers: headers}) do - # Return request headers from payload headers end def get_headers(%{connection: connection}) do - # Fallback: Return request headers from connection metadata connection.metadata || %{} end @@ -508,7 +499,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end def get_cert(%{socket: socket}) do - # Get SSL certificate if available case ThousandIsland.Socket.peercert(socket) do {:ok, cert} -> {:ok, cert} {:error, _} -> {:error, :no_peercert} @@ -558,11 +548,9 @@ defmodule GRPC.Server.Adapters.ThousandIsland do :ok end - # Fallback for non-streaming def send_headers(_payload, _headers), do: :ok def send_trailers(%{handler_pid: pid, stream_id: stream_id}, trailers) do - # Send trailers for streaming to handler process send(pid, {:grpc_send_trailers, stream_id, trailers}) :ok end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/README.md b/grpc_server/lib/grpc/server/adapters/thousand_island/README.md deleted file mode 100644 index 9368322d9..000000000 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/README.md +++ /dev/null @@ -1,236 +0,0 @@ -# ThousandIsland Adapter - -Pure Elixir HTTP/2 gRPC server adapter using ThousandIsland. - -## Overview - -This adapter provides a pure Elixir implementation of HTTP/2 for gRPC, built on top of ThousandIsland TCP server. Unlike the Cowboy adapter, this implementation has no native dependencies. - -## Usage - -### Basic Setup - -In your application supervisor: - -```elixir -defmodule MyApp.Application do - use Application - - def start(_type, _args) do - children = [ - # Your gRPC endpoint using ThousandIsland adapter - {GRPC.Server.Supervisor, - endpoint: MyApp.Endpoint, - port: 50051, - start_server: true, - adapter: GRPC.Server.Adapters.ThousandIsland, - adapter_opts: [ - num_acceptors: 10, - max_connections: 1000 - ]} - ] - - opts = [strategy: :one_for_one, name: MyApp.Supervisor] - Supervisor.start_link(children, opts) - end -end -``` - -### Defining Your Endpoint - -```elixir -defmodule MyApp.Endpoint do - use GRPC.Endpoint - - intercept GRPC.Server.Interceptors.Logger - run MyApp.Greeter.Server - run MyApp.OtherService.Server -end -``` - -### Defining Your Service - -```elixir -defmodule MyApp.Greeter.Service do - use GRPC.Service, name: "helloworld.Greeter" - - rpc :SayHello, Helloworld.HelloRequest, Helloworld.HelloReply -end - -defmodule MyApp.Greeter.Server do - use GRPC.Server, service: MyApp.Greeter.Service - - @spec say_hello(Helloworld.HelloRequest.t(), GRPC.Server.Stream.t()) :: - Helloworld.HelloReply.t() - def say_hello(request, _stream) do - Helloworld.HelloReply.new(message: "Hello #{request.name}") - end -end -``` - -## Configuration Options - -### Adapter Options - -- `:num_acceptors` - Number of acceptor processes (default: 100) -- `:max_connections` - Maximum concurrent connections (default: 16_384) -- `:transport_options` - Additional transport options passed to ThousandIsland - -### TLS Configuration - -For production use, configure TLS: - -```elixir -{GRPC.Server.Supervisor, - endpoint: MyApp.Endpoint, - port: 50051, - start_server: true, - adapter: GRPC.Server.Adapters.ThousandIsland, - adapter_opts: [ - transport_module: ThousandIsland.Transports.SSL, - transport_options: [ - certfile: "/path/to/cert.pem", - keyfile: "/path/to/key.pem", - alpn_preferred_protocols: ["h2"] - ] - ]} -``` - -## Features - -### Supported - -- ✅ HTTP/2 with HPACK header compression -- ✅ All 4 gRPC streaming types: - - Unary (single request → single response) - - Client streaming (stream → single response) - - Server streaming (single → stream) - - Bidirectional streaming (stream → stream) -- ✅ Flow control (connection and stream level) -- ✅ Multiple concurrent streams per connection -- ✅ gRPC error handling with status codes -- ✅ Content negotiation (protobuf, compression) -- ✅ Pure Elixir implementation (no NIFs) - -### Not Yet Supported - -- ⚠️ HTTP/2 Server Push -- ⚠️ SETTINGS frame priority handling -- ⚠️ HTTP/2 PRIORITY frames - -## Architecture - -``` -GRPC.Server.Supervisor - └── ThousandIsland (TCP acceptor pool) - └── Connection processes (one per client) - ├── Handler (HTTP/2 frame handling) - ├── Connection (state + dispatch) - ├── StreamRegistry (stream lifecycle) - └── Dispatcher (gRPC service routing) -``` - -Each client connection is handled by a separate process that: - -1. Accepts HTTP/2 connection preface -2. Manages HTTP/2 settings and flow control -3. Routes incoming gRPC requests to service implementations -4. Handles streaming in both directions -5. Encodes responses with proper framing and headers - -## Testing - -### Unit Tests - -Unit tests for the adapter are in `test/grpc/server/adapters/thousand_island_test.exs`. - -### Integration Tests - -Integration tests belong in the **grpc_client** package, not in grpc_server. This follows the architecture where: - -- `grpc_server` contains server implementation and unit tests -- `grpc_client` contains client implementation and integration tests (client ↔ server) - -To test the ThousandIsland adapter end-to-end: - -1. Start a test server using `GRPC.Server.Supervisor` with the ThousandIsland adapter -2. Use `GRPC.Stub` to create a client connection -3. Make RPC calls and verify responses - -Example integration test (in grpc_client): - -```elixir -# In grpc_client/test/integration/thousand_island_test.exs - -setup_all do - # Start server with ThousandIsland adapter - {:ok, _pid, port} = GRPC.Server.start( - MyTestEndpoint, - port: 0, - adapter: GRPC.Server.Adapters.ThousandIsland - ) - - # Connect client - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - {:ok, channel: channel} -end - -test "unary RPC works", %{channel: channel} do - request = Helloworld.HelloRequest.new(name: "World") - {:ok, reply} = channel |> Helloworld.Greeter.Stub.say_hello(request) - - assert reply.message == "Hello World" -end -``` - -## Performance Considerations - -- Each connection runs in its own process -- Connection processes are supervised by ThousandIsland -- Streams are tracked in ETS for fast lookup -- Flow control prevents memory exhaustion -- HPACK compression reduces header overhead - -For high-throughput scenarios, tune: - -- `:num_acceptors` - More acceptors for connection bursts -- `:max_connections` - Total concurrent connection limit -- Initial flow control window sizes (in Settings) - -## Comparison with Cowboy Adapter - -| Feature | ThousandIsland | Cowboy | -|---------|---------------|--------| -| Implementation | Pure Elixir | NIF (Ranch) | -| HTTP/2 | Custom | Cowlib | -| Dependencies | Minimal | Ranch, Cowlib | -| Performance | Good | Excellent | -| Debugging | Easier | Harder | -| Production Ready | Testing | Yes | - -## Development Status - -**Current Status**: Feature-complete, testing phase - -The adapter implements all core gRPC functionality. Integration testing with real clients is needed before production use. - -## Contributing - -The adapter code is in `lib/grpc/server/adapters/thousand_island/`: - -- `adapter.ex` - Main adapter interface -- `handler.ex` - HTTP/2 frame handling -- `connection.ex` - Connection state and dispatch -- `dispatcher.ex` - gRPC service routing -- `stream_state.ex` - Individual stream lifecycle -- `stream_registry.ex` - Multi-stream coordination -- `stream_collector.ex` - Streaming response collection -- `http2/` - HTTP/2 protocol implementation - -When contributing: - -1. Add unit tests for new functionality -2. Follow existing code style -3. Update this README for user-visible changes -4. Add integration tests in grpc_client package diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index 577f02c54..28ed344e3 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -34,7 +34,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do Keyword.get(handler_options, :opts, [])} end - # Convert servers list/map to standardized map format servers = cond do is_map(servers_list) and not is_struct(servers_list) -> servers_list @@ -52,7 +51,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do connection: nil, buffer: <<>>, preface_received: false, - # Map of stream_id => headers accumulated_headers: %{} } @@ -82,10 +80,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do {:close, state} end - # Streaming message handlers (GenServer callbacks) - # ThousandIsland passes state as {socket, user_state} - - # Accumulate headers without sending def handle_info({:grpc_accumulate_headers, stream_id, headers}, {socket, state}) do current_headers = Map.get(state.accumulated_headers, stream_id, %{}) updated_headers = Map.merge(current_headers, headers) @@ -94,38 +88,32 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do end def handle_info({:grpc_send_headers, stream_id, headers}, {socket, state}) do - # Send headers frame for streaming Logger.debug("[Streaming] Sending headers for stream #{stream_id}") Connection.send_headers(socket, stream_id, headers, state.connection) {:noreply, {socket, state}} end def handle_info({:grpc_send_data, stream_id, data}, {socket, state}) do - # Check if we need to send accumulated headers first accumulated = Map.get(state.accumulated_headers, stream_id, %{}) new_state = if map_size(accumulated) > 0 do Connection.send_headers(socket, stream_id, accumulated, state.connection) - # Clear accumulated headers for this stream %{state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id)} else state end - # Send data frame for streaming Connection.send_data(socket, stream_id, data, false, new_state.connection) {:noreply, {socket, new_state}} end def handle_info({:grpc_send_trailers, stream_id, trailers}, {socket, state}) do - # Check if we need to send accumulated headers first (for empty streams) accumulated = Map.get(state.accumulated_headers, stream_id, %{}) new_state = if map_size(accumulated) > 0 do updated_conn = Connection.send_headers(socket, stream_id, accumulated, state.connection) - # Clear accumulated headers for this stream %{ state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id), @@ -137,15 +125,15 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do # Send trailers (headers with END_STREAM) for streaming # This will also remove the stream from the connection - updated_connection = Connection.send_trailers(socket, stream_id, trailers, new_state.connection) - + updated_connection = + Connection.send_trailers(socket, stream_id, trailers, new_state.connection) + new_state = %{new_state | connection: updated_connection} {:noreply, {socket, new_state}} end def handle_info({:update_stream_state, stream_id, updated_stream_state}, {socket, state}) do - # Update the stream_state in the connection for bidi streaming - Logger.info( + Logger.debug( "[Handler] Updating stream_state for stream #{stream_id}, bidi_pid=#{inspect(updated_stream_state.bidi_stream_pid)}" ) @@ -171,7 +159,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do defp handle_preface(<<@connection_preface, remaining::binary>>, socket, state) do # Valid preface, initialize connection try do - # Pass handler PID for streaming support opts = Keyword.put(state.opts, :handler_pid, self()) connection = Connection.init(socket, state.endpoint, state.servers, opts) new_state = %{state | connection: connection, preface_received: true, buffer: <<>>} @@ -192,8 +179,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do end defp handle_preface(_buffer, _socket, state) do - # Invalid preface - Logger.error("Invalid HTTP/2 preface") + Logger.debug("Invalid HTTP/2 preface") {:close, state} end @@ -207,7 +193,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do ) end - # Optimized tail-recursive frame processing defp handle_frames_loop(buffer, socket, connection, max_frame_size, original_state) do case Frame.deserialize(buffer, max_frame_size) do {{:ok, frame}, rest} -> @@ -223,11 +208,11 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do end rescue e in Errors.ConnectionError -> - Logger.error("Connection error: #{e.message}") + Logger.debug("Connection error: #{e.message}") {:close, original_state} e in Errors.StreamError -> - Logger.error("Stream error: #{e.message}") + Logger.debug("Stream error: #{e.message}") {:continue, %{original_state | connection: connection, buffer: rest}} end @@ -236,7 +221,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do {:continue, %{original_state | connection: connection, buffer: buffer}} {{:error, error_code, reason}, _rest} -> - Logger.error("Frame deserialization error: #{reason} (code: #{error_code})") + Logger.debug("Frame deserialization error: #{reason} (code: #{error_code})") {:close, original_state} nil -> @@ -245,8 +230,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do end end - # API functions called by the adapter - def read_full_body(pid) do GenServer.call(pid, :read_full_body) end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back b/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back deleted file mode 100644 index 5decd6535..000000000 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/http2.back +++ /dev/null @@ -1,275 +0,0 @@ -defmodule GRPC.Server.Adapters.ThousandIsland.HTTP2 do - @moduledoc """ - HTTP/2 frame handling for gRPC over ThousandIsland. - - This module provides HTTP/2 protocol support using the chatterbox library, - handling frame parsing, stream management, and HPACK compression. - """ - - require Logger - - @type stream_id :: non_neg_integer() - @type headers :: [{binary(), binary()}] - @type frame :: term() - - @doc """ - Initialize HTTP/2 connection state. - """ - def init_connection do - # Initialize HTTP/2 connection settings - %{ - streams: %{}, - next_stream_id: 1, - settings: default_settings(), - header_table: :hpack.new_context(), - last_stream_id: 0 - } - end - - @doc """ - Process incoming HTTP/2 frame data. - """ - def handle_data(data, state) do - case parse_frame(data, state) do - {:ok, frames, rest, new_state} -> - {:ok, frames, rest, new_state} - - {:error, reason} -> - {:error, reason} - - {:more, new_state} -> - {:more, new_state} - end - end - - @doc """ - Parse HTTP/2 frames from binary data. - """ - defp parse_frame(<<>>, state), do: {:more, state} - - defp parse_frame(data, state) when byte_size(data) < 9 do - # Need at least frame header (9 bytes) - {:more, Map.put(state, :buffer, data)} - end - - defp parse_frame(data, state) do - case :h2_frame.read(data) do - {:ok, frame, rest} -> - {frames, new_state} = process_frame(frame, state) - - if byte_size(rest) > 0 do - case parse_frame(rest, new_state) do - {:ok, more_frames, final_rest, final_state} -> - {:ok, frames ++ more_frames, final_rest, final_state} - - other -> - other - end - else - {:ok, frames, rest, new_state} - end - - {:error, reason} -> - {:error, reason} - - :incomplete -> - {:more, Map.put(state, :buffer, data)} - end - end - - @doc """ - Process a single HTTP/2 frame. - """ - defp process_frame({:headers, stream_id, headers, flags}, state) do - Logger.debug("Received HEADERS frame for stream #{stream_id}") - - # Decode HPACK-compressed headers - {:ok, decoded_headers, new_context} = - :hpack.decode(headers, state.header_table) - - new_state = %{ - state - | header_table: new_context, - last_stream_id: max(state.last_stream_id, stream_id) - } - - # Check if END_HEADERS flag is set - end_headers? = :h2_frame.is_flag_set(:end_headers, flags) - end_stream? = :h2_frame.is_flag_set(:end_stream, flags) - - stream_info = %{ - stream_id: stream_id, - headers: decoded_headers, - end_headers: end_headers?, - end_stream: end_stream?, - state: if(end_stream?, do: :half_closed_remote, else: :open) - } - - new_state = put_in(new_state, [:streams, stream_id], stream_info) - - {[{:headers, stream_id, decoded_headers, flags}], new_state} - end - - defp process_frame({:data, stream_id, data, flags}, state) do - Logger.debug("Received DATA frame for stream #{stream_id}: #{byte_size(data)} bytes") - - end_stream? = :h2_frame.is_flag_set(:end_stream, flags) - - if end_stream? do - # Update stream state - new_state = - update_in(state, [:streams, stream_id, :state], fn _ -> - :half_closed_remote - end) - - {[{:data, stream_id, data, true}], new_state} - else - {[{:data, stream_id, data, false}], state} - end - end - - defp process_frame({:settings, _stream_id, settings}, state) do - Logger.debug("Received SETTINGS frame: #{inspect(settings)}") - - # Merge received settings with our settings - new_settings = Map.merge(state.settings, Enum.into(settings, %{})) - new_state = %{state | settings: new_settings} - - # Must send SETTINGS ACK - ack_frame = encode_settings_ack() - - {[{:settings, settings}, {:send, ack_frame}], new_state} - end - - defp process_frame({:window_update, stream_id, increment}, state) do - Logger.debug("Received WINDOW_UPDATE for stream #{stream_id}: +#{increment}") - - # Update flow control window - # TODO: Implement proper flow control - {[{:window_update, stream_id, increment}], state} - end - - defp process_frame({:rst_stream, stream_id, error_code}, state) do - Logger.debug("Received RST_STREAM for stream #{stream_id}: #{error_code}") - - # Remove stream from state - new_state = - update_in(state, [:streams], fn streams -> - Map.delete(streams, stream_id) - end) - - {[{:rst_stream, stream_id, error_code}], new_state} - end - - defp process_frame({:ping, _stream_id, opaque_data}, state) do - Logger.debug("Received PING frame") - - # Must respond with PING ACK - pong_frame = encode_ping_ack(opaque_data) - - {[{:ping, opaque_data}, {:send, pong_frame}], state} - end - - defp process_frame({:goaway, _stream_id, last_stream_id, error_code, debug_data}, state) do - Logger.info("Received GOAWAY: last_stream=#{last_stream_id}, error=#{error_code}") - - {[{:goaway, last_stream_id, error_code, debug_data}], state} - end - - defp process_frame(frame, state) do - Logger.warning("Unhandled frame type: #{inspect(frame)}") - {[], state} - end - - @doc """ - Encode headers frame. - """ - def encode_headers(stream_id, headers, end_stream?, context) do - {:ok, encoded_headers, new_context} = :hpack.encode(headers, context) - - flags = - [:end_headers] ++ - if end_stream?, do: [:end_stream], else: [] - - frame = :h2_frame.headers(stream_id, flags, encoded_headers) - {:ok, :h2_frame.to_binary(frame), new_context} - end - - @doc """ - Encode data frame. - """ - def encode_data(stream_id, data, end_stream?) do - flags = if end_stream?, do: [:end_stream], else: [] - frame = :h2_frame.data(stream_id, flags, data) - {:ok, :h2_frame.to_binary(frame)} - end - - @doc """ - Encode settings frame. - """ - def encode_settings(settings) do - frame = :h2_frame.settings(0, [], settings) - :h2_frame.to_binary(frame) - end - - @doc """ - Encode settings ACK frame. - """ - def encode_settings_ack do - frame = :h2_frame.settings(0, [:ack], []) - :h2_frame.to_binary(frame) - end - - @doc """ - Encode ping ACK frame. - """ - def encode_ping_ack(opaque_data) do - frame = :h2_frame.ping(0, [:ack], opaque_data) - :h2_frame.to_binary(frame) - end - - @doc """ - Encode RST_STREAM frame. - """ - def encode_rst_stream(stream_id, error_code) do - frame = :h2_frame.rst_stream(stream_id, error_code) - :h2_frame.to_binary(frame) - end - - @doc """ - Encode GOAWAY frame. - """ - def encode_goaway(last_stream_id, error_code, debug_data \\ <<>>) do - frame = :h2_frame.goaway(0, last_stream_id, error_code, debug_data) - :h2_frame.to_binary(frame) - end - - @doc """ - Default HTTP/2 settings. - """ - def default_settings do - %{ - header_table_size: 4096, - enable_push: 0, - max_concurrent_streams: 100, - initial_window_size: 65535, - max_frame_size: 16384, - max_header_list_size: :infinity - } - end - - @doc """ - Validate HTTP/2 connection preface. - """ - def validate_preface(<<"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", rest::binary>>) do - {:ok, rest} - end - - def validate_preface(data) when byte_size(data) < 24 do - {:more, data} - end - - def validate_preface(_data) do - {:error, :invalid_preface} - end -end diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex index 0bb9360eb..f4c618b1d 100644 --- a/grpc_server/lib/grpc/server/http2/connection.ex +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -21,9 +21,7 @@ defmodule GRPC.Server.HTTP2.Connection do send_window_size: 65_535, recv_window_size: 65_535, streams: %{}, - # Map of stream_id => stream_state next_stream_id: 2, - # Client streams are odd, server push is even endpoint: nil, servers: %{}, socket: nil, @@ -84,7 +82,7 @@ defmodule GRPC.Server.HTTP2.Connection do def send_headers(socket, stream_id, headers, connection) do # Check if stream still exists (may have been closed by RST_STREAM) unless Map.has_key?(connection.streams, stream_id) do - Logger.warning( + Logger.debug( "[send_headers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" ) @@ -115,7 +113,7 @@ defmodule GRPC.Server.HTTP2.Connection do def send_data(socket, stream_id, data, end_stream, connection) do # Check if stream still exists (may have been closed by RST_STREAM) unless Map.has_key?(connection.streams, stream_id) do - Logger.warning( + Logger.debug( "[send_data] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" ) @@ -138,7 +136,7 @@ defmodule GRPC.Server.HTTP2.Connection do def send_trailers(socket, stream_id, trailers, connection) do # Check if stream still exists (may have been closed by RST_STREAM) unless Map.has_key?(connection.streams, stream_id) do - Logger.warning( + Logger.debug( "[send_trailers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" ) @@ -327,7 +325,7 @@ defmodule GRPC.Server.HTTP2.Connection do case Map.get(connection.streams, stream_id) do nil -> - Logger.warning( + Logger.debug( "[IGNORE_DATA] stream=#{stream_id} not found, size=#{byte_size(frame.data)} (stream already closed)" ) @@ -478,7 +476,10 @@ defmodule GRPC.Server.HTTP2.Connection do # Check if this is bidirectional streaming # For bidi, we need to process messages as they arrive (not wait for END_STREAM) is_bidi = - GRPC.Server.HTTP2.Dispatcher.is_bidi_streaming?(stream_state.path, connection.servers) + GRPC.Server.HTTP2.Dispatcher.is_bidi_streaming?( + stream_state.path, + connection.servers + ) stream_state = %{stream_state | is_bidi_streaming: is_bidi} @@ -509,7 +510,7 @@ defmodule GRPC.Server.HTTP2.Connection do %{connection | recv_hpack_state: new_hpack_state} {:error, reason} -> - Logger.warning( + Logger.debug( "[handle_headers_frame] Failed to decode trailers for stream #{frame.stream_id}: #{inspect(reason)}" ) @@ -713,7 +714,7 @@ defmodule GRPC.Server.HTTP2.Connection do | streams: Map.delete(updated_connection.streams, stream_state.stream_id) } else - Logger.warning( + Logger.debug( "[process_grpc_request] Stream #{stream_state.stream_id} already removed, skipping error send" ) @@ -789,7 +790,7 @@ defmodule GRPC.Server.HTTP2.Connection do | streams: Map.delete(updated_connection.streams, stream_state.stream_id) } else - Logger.warning( + Logger.debug( "[process_grpc_request] Stream #{stream_state.stream_id} already removed in rescue, skipping error send" ) @@ -801,7 +802,7 @@ defmodule GRPC.Server.HTTP2.Connection do defp send_grpc_trailers(socket, stream_id, trailers, connection) do # Check if stream still exists (may have been closed by RST_STREAM) unless Map.has_key?(connection.streams, stream_id) do - Logger.warning( + Logger.debug( "[send_grpc_trailers] SKIPPED - stream=#{stream_id} no longer exists (likely cancelled by client)" ) @@ -811,14 +812,11 @@ defmodule GRPC.Server.HTTP2.Connection do "[send_grpc_trailers] Sending trailers for stream #{stream_id}: #{inspect(trailers)}" ) - # Convert map to list of tuples for HPAX trailer_list = Map.to_list(trailers) - # Encode trailers using HPACK {trailer_block, new_hpack} = HPAX.encode(:no_store, trailer_list, connection.send_hpack_state) - # Send HEADERS frame with END_STREAM flag headers_frame = %Frame.Headers{ stream_id: stream_id, fragment: trailer_block, @@ -828,7 +826,6 @@ defmodule GRPC.Server.HTTP2.Connection do send_frame(headers_frame, socket, connection) - # Return updated connection with new HPACK state %{connection | send_hpack_state: new_hpack} end end @@ -865,7 +862,6 @@ defmodule GRPC.Server.HTTP2.Connection do {[], connection.send_hpack_state} end - # Encode data frame data_frame = %Frame.Data{ stream_id: stream_id, data: response_data, @@ -874,7 +870,6 @@ defmodule GRPC.Server.HTTP2.Connection do data_iodata = Frame.serialize(data_frame, max_frame_size) - # Encode trailers frame trailer_list = Map.to_list(trailers) {trailer_block, final_hpack} = HPAX.encode(:no_store, trailer_list, hpack_after_headers) @@ -895,13 +890,12 @@ defmodule GRPC.Server.HTTP2.Connection do ThousandIsland.Socket.send(socket, combined_iodata) end - # Return updated connection with new HPACK state %{connection | send_hpack_state: final_hpack} end defp extract_messages_from_buffer(stream_state) do # Extract 5-byte length-prefixed messages from data_buffer - Logger.info( + Logger.debug( "[Connection] Extracting messages from data_buffer (#{byte_size(stream_state.data_buffer)} bytes)" ) @@ -909,7 +903,7 @@ defmodule GRPC.Server.HTTP2.Connection do # Reverse since we build list backwards for performance extracted_count = length(messages) - Logger.info( + Logger.debug( "[Connection] Extracted #{extracted_count} messages, #{byte_size(remaining)} bytes remaining" ) @@ -963,17 +957,17 @@ defmodule GRPC.Server.HTTP2.Connection do # Se o stream não existe OU já enviamos erro, não fazer nada if !stream_state do - Logger.warning("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream not found") + Logger.debug("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream not found") connection else if stream_state.error_sent do - Logger.warning("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - error already sent") + Logger.debug("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - error already sent") connection else headers_sent = stream_state.headers_sent end_stream_received = stream_state.end_stream_received - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] stream=#{stream_id}, status=#{status_code}, message=#{message}, headers_sent=#{headers_sent}, end_stream_received=#{end_stream_received}" ) @@ -981,19 +975,18 @@ defmodule GRPC.Server.HTTP2.Connection do # o stream está "closed" e NÃO podemos enviar mais frames (exceto PRIORITY) # Nesse caso, apenas remover o stream e não enviar nada if headers_sent && end_stream_received do - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream is fully closed (both sides sent END_STREAM)" ) %{connection | streams: Map.delete(connection.streams, stream_id)} else # Check if headers were already sent for this stream - # ALWAYS send headers first if not sent yet, even if stream received END_STREAM # HTTP/2 requires :status pseudo-header before trailers updated_connection = if !headers_sent do - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] Sending HTTP/2 headers first for stream=#{stream_id}" ) @@ -1011,13 +1004,13 @@ defmodule GRPC.Server.HTTP2.Connection do "grpc-message" => message } - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" ) send_grpc_trailers(socket, stream_id, trailers, updated_conn) else - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] SKIPPED trailers - stream=#{stream_id} was closed after sending headers" ) @@ -1030,7 +1023,7 @@ defmodule GRPC.Server.HTTP2.Connection do "grpc-message" => message } - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] Sending TRAILERS with END_STREAM for stream=#{stream_id}" ) @@ -1045,7 +1038,7 @@ defmodule GRPC.Server.HTTP2.Connection do if s, do: %{s | error_sent: true}, else: nil end) else - Logger.warning( + Logger.debug( "[SEND_GRPC_ERROR] SKIPPED marking error_sent - stream=#{stream_id} was already closed" ) diff --git a/grpc_server/lib/grpc/server/http2/dispatcher.ex b/grpc_server/lib/grpc/server/http2/dispatcher.ex index 6cfb9b9dc..0746bfbb5 100644 --- a/grpc_server/lib/grpc/server/http2/dispatcher.ex +++ b/grpc_server/lib/grpc/server/http2/dispatcher.ex @@ -42,7 +42,7 @@ defmodule GRPC.Server.HTTP2.Dispatcher do @spec dispatch(StreamState.t(), map(), atom(), GRPC.Server.HTTP2.Connection.t()) :: {:ok, list(), binary(), map()} | {:error, GRPC.RPCError.t()} def dispatch(%StreamState{} = stream_state, servers, endpoint, connection) do - Logger.info( + Logger.debug( "[dispatch] path=#{stream_state.path}, messages=#{length(stream_state.message_buffer)}" ) @@ -50,7 +50,7 @@ defmodule GRPC.Server.HTTP2.Dispatcher do if StreamState.deadline_exceeded?(stream_state) do now = System.monotonic_time(:microsecond) - Logger.warning( + Logger.debug( "[dispatch] Deadline exceeded for path=#{stream_state.path}, deadline=#{stream_state.deadline}, now=#{now}, diff=#{now - (stream_state.deadline || now)}us" ) @@ -696,7 +696,7 @@ defmodule GRPC.Server.HTTP2.Dispatcher do if res_stream? do # This path shouldn't be reached anymore (streaming handled in call_server_streaming) - Logger.warning("Unexpected: encode_responses called with streaming response") + Logger.debug("Unexpected: encode_responses called with streaming response") {:ok, :streaming_done} else # Unary - encode single response with custom headers/trailers from stream_state From 1e4939f9c50dd592f22333f922f3a6062f016f9a Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Thu, 11 Dec 2025 01:19:01 -0300 Subject: [PATCH 19/47] ci: fix format check by using mix setup instead of mix deps.get --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3c64f97c8..6fcbddf91 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: path: deps key: v1-${{ matrix.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} - name: Install Dependencies - run: mix deps.get 1>/dev/null + run: mix setup 1>/dev/null - name: Check format run: mix format --check-formatted tests: From 8361b0da1c7f8abf0529b7dacdd144f12a1f1bcc Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Thu, 11 Dec 2025 01:24:19 -0300 Subject: [PATCH 20/47] chore: some cleanup --- benchmark/lib/mix/tasks/benchmark.profile.ex | 105 ------------------- benchmark/scripts/profile_server.exs | 70 ------------- benchmark/scripts/quick_bench.sh | 20 ---- benchmark/scripts/run_optimized.sh | 15 --- interop/script/quick_test.exs | 52 --------- interop/script/test_custom_metadata.exs | 34 ------ interop/script/test_one.exs | 43 -------- 7 files changed, 339 deletions(-) delete mode 100644 benchmark/lib/mix/tasks/benchmark.profile.ex delete mode 100644 benchmark/scripts/profile_server.exs delete mode 100755 benchmark/scripts/quick_bench.sh delete mode 100755 benchmark/scripts/run_optimized.sh delete mode 100644 interop/script/quick_test.exs delete mode 100644 interop/script/test_custom_metadata.exs delete mode 100644 interop/script/test_one.exs diff --git a/benchmark/lib/mix/tasks/benchmark.profile.ex b/benchmark/lib/mix/tasks/benchmark.profile.ex deleted file mode 100644 index bec8e6ea4..000000000 --- a/benchmark/lib/mix/tasks/benchmark.profile.ex +++ /dev/null @@ -1,105 +0,0 @@ -defmodule Mix.Tasks.Benchmark.Profile do - use Mix.Task - require Logger - - @shortdoc "Profile the gRPC benchmark to find bottlenecks" - - @moduledoc """ - Profiles the gRPC benchmark using Erlang's profiling tools. - - Usage: - mix benchmark.profile --adapter=thousand_island --requests=500 - mix benchmark.profile --adapter=cowboy --requests=500 - """ - - def run(args) do - # Disable logging to reduce noise - Logger.configure(level: :error) - - Mix.Task.run("app.start") - - {opts, _, _} = OptionParser.parse(args, - switches: [adapter: :string, requests: :integer, port: :integer], - aliases: [a: :adapter, r: :requests] - ) - - adapter_name = opts[:adapter] || "thousand_island" - num_requests = opts[:requests] || 500 - port = opts[:port] || 10000 - - adapter = case String.downcase(adapter_name) do - "thousand_island" -> GRPC.Server.Adapters.ThousandIsland - "cowboy" -> GRPC.Server.Adapters.Cowboy - _ -> GRPC.Server.Adapters.ThousandIsland - end - - # Start server - IO.puts("Starting #{adapter_name} server on port #{port}...") - - server = %Grpc.Testing.ServerConfig{ - async_server_threads: 1, - port: port - } - - _server = Benchmark.ServerManager.start_server(server, adapter: adapter) - Process.sleep(500) - - # Connect client - {:ok, ch} = GRPC.Stub.connect("localhost:#{port}") - - # Prepare request - payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) - req = %Grpc.Testing.SimpleRequest{ - response_type: payload_type, - response_size: 0, - payload: %Grpc.Testing.Payload{type: payload_type, body: <<>>} - } - - # Warm up - IO.puts("Warming up with 100 requests...") - Enum.each(1..100, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) - end) - - IO.puts("\n=== Profiling with :eprof (#{num_requests} requests) ===") - IO.puts("This will show time spent in each function...\n") - - # Get the handler process pid - we need to profile the server side - Process.sleep(100) - - # Profile using :eprof - it measures time, not just call counts - spawn(fn -> - Process.sleep(50) - - # Start profiling all processes - :eprof.start() - :eprof.start_profiling(:processes) - - # Let it run for the benchmark - Process.sleep(div(num_requests * 2, 1)) - - :eprof.stop_profiling() - IO.puts("\n\n=== EPROF Analysis ===") - :eprof.analyze([:total]) - :eprof.stop() - end) - - # Run benchmark - start_time = System.monotonic_time(:millisecond) - Enum.each(1..num_requests, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) - end) - end_time = System.monotonic_time(:millisecond) - - elapsed = end_time - start_time - req_per_sec = num_requests / (elapsed / 1000) - - IO.puts("\nPerformance: #{Float.round(req_per_sec, 2)} req/s (#{num_requests} requests in #{elapsed}ms)") - - # Wait for profiling to complete - Process.sleep(2000) - - IO.puts("\n=== Done! ===") - IO.puts("Look for GRPC.Server.* modules above to see where time is spent.") - end -end diff --git a/benchmark/scripts/profile_server.exs b/benchmark/scripts/profile_server.exs deleted file mode 100644 index 054e9a135..000000000 --- a/benchmark/scripts/profile_server.exs +++ /dev/null @@ -1,70 +0,0 @@ -# Profile gRPC server to identify performance bottlenecks -# Run with: mix run scripts/profile_server.exs - -require Logger - -Logger.configure(level: :warning) - -# Start server -{:ok, _pid, port} = GRPC.Server.start_endpoint(Benchmark.ServerManager, 0) - -IO.puts("Server started on port #{port}") -IO.puts("Waiting 2 seconds for server to be ready...") -Process.sleep(2000) - -# Prepare client connection -opts = [adapter: GRPC.Client.Adapters.Gun] -channel = GRPC.Client.Stub.connect("127.0.0.1:#{port}", opts) - -# Prepare request -request = Grpc.Testing.SimpleRequest.new( - response_type: :COMPRESSABLE, - response_size: 314_159, - payload: Grpc.Testing.Payload.new( - type: :COMPRESSABLE, - body: :binary.copy(<<0>>, 271_828) - ) -) - -IO.puts("\n=== Starting profiling with :fprof ===") -IO.puts("Warmup: sending 100 requests...") - -# Warmup -for _ <- 1..100 do - {:ok, _response} = channel - |> Grpc.Testing.BenchmarkService.Stub.unary_call(request) -end - -IO.puts("Warmup complete. Starting profiling...") -Process.sleep(1000) - -# Start profiling -:fprof.trace([:start, {:procs, Process.list()}]) - -# Run profiled requests -for _ <- 1..1000 do - {:ok, _response} = channel - |> Grpc.Testing.BenchmarkService.Stub.unary_call(request) -end - -# Stop profiling -:fprof.trace(:stop) - -IO.puts("Profiling complete. Analyzing results...") - -# Analyze -:fprof.profile() -:fprof.analyse([ - totals: true, - sort: :acc, - dest: ~c"benchmark/profile_results.txt" -]) - -IO.puts("\nResults saved to benchmark/profile_results.txt") -IO.puts("\nTop functions by accumulated time:") -:fprof.analyse([totals: true, sort: :acc, dest: []]) - -# Cleanup -GRPC.Server.stop_endpoint(Benchmark.ServerManager) - -IO.puts("\n=== Profiling complete ===") diff --git a/benchmark/scripts/quick_bench.sh b/benchmark/scripts/quick_bench.sh deleted file mode 100755 index ab16146cf..000000000 --- a/benchmark/scripts/quick_bench.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -echo "========================================" -echo "Quick Performance Benchmark" -echo "========================================" -echo "" - -cd "$(dirname "$0")/.." - -echo "Running optimized benchmark..." -echo "" - -MIX_ENV=prod mix benchmark.test --codec=proto --requests=30000 2>&1 | tee /tmp/bench_output.txt - -echo "" -echo "========================================" -echo "Results Summary:" -echo "========================================" -grep -E "req/s|Requests|requests/sec" /tmp/bench_output.txt || echo "Check full output above" diff --git a/benchmark/scripts/run_optimized.sh b/benchmark/scripts/run_optimized.sh deleted file mode 100755 index 42032415b..000000000 --- a/benchmark/scripts/run_optimized.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -# Run benchmark with optimized Erlang VM flags - -set -e - -# Optimize Erlang VM for performance -export ERL_FLAGS="+sbwt very_long +swt very_low +sub true +pc unicode" -export ELIXIR_ERL_OPTIONS="+fnu" - -# Compiler optimizations -export ERL_COMPILER_OPTIONS="[inline,{inline_size,128}]" - -# Run benchmark -cd "$(dirname "$0")/.." -MIX_ENV=prod mix benchmark.test --codec=proto --requests=30000 diff --git a/interop/script/quick_test.exs b/interop/script/quick_test.exs deleted file mode 100644 index 13dc6a11c..000000000 --- a/interop/script/quick_test.exs +++ /dev/null @@ -1,52 +0,0 @@ -Logger.configure(level: :warning) - -alias GRPC.Client.Adapters.Gun -alias Interop.Client - -{:ok, _pid, port} = GRPC.Server.start_endpoint(Interop.Endpoint, 0, adapter: GRPC.Server.Adapters.ThousandIsland) -IO.puts("Server started on port #{port}") - -opts = [adapter: Gun] -ch = Client.connect("127.0.0.1:#{port}", opts) - -tests = [ - {"empty_unary", fn -> Client.empty_unary!(ch) end}, - {"cacheable_unary", fn -> Client.cacheable_unary!(ch) end}, - {"large_unary", fn -> Client.large_unary!(ch) end}, - {"large_unary2", fn -> Client.large_unary2!(ch) end}, - {"client_compressed_unary", fn -> Client.client_compressed_unary!(ch) end}, - {"server_compressed_unary", fn -> Client.server_compressed_unary!(ch) end}, - {"client_streaming", fn -> Client.client_streaming!(ch) end}, - {"client_compressed_streaming", fn -> Client.client_compressed_streaming!(ch) end}, - {"server_streaming", fn -> Client.server_streaming!(ch) end}, - {"server_compressed_streaming", fn -> Client.server_compressed_streaming!(ch) end}, - {"ping_pong", fn -> Client.ping_pong!(ch) end}, - {"empty_stream", fn -> Client.empty_stream!(ch) end}, - {"custom_metadata", fn -> Client.custom_metadata!(ch) end}, - {"status_code_and_message", fn -> Client.status_code_and_message!(ch) end}, - {"unimplemented_service", fn -> Client.unimplemented_service!(ch) end}, - {"cancel_after_begin", fn -> Client.cancel_after_begin!(ch) end}, - {"cancel_after_first_response", fn -> Client.cancel_after_first_response!(ch) end}, - # Note: timeout_on_sleeping_server skipped - requires client adapter fix for DEADLINE_EXCEEDED status -] - -results = %{passed: 0, failed: 0} - -results = for {name, test_fn} <- tests, reduce: results do - acc -> - try do - test_fn.() - IO.puts("✓ #{name}") - %{acc | passed: acc.passed + 1} - rescue - e -> - IO.puts("✗ #{name}: #{Exception.message(e)}") - %{acc | failed: acc.failed + 1} - end -end - -IO.puts("\n========================================") -IO.puts("Results: #{results.passed}/#{results.passed + results.failed} tests passed") -IO.puts("========================================") - -GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) diff --git a/interop/script/test_custom_metadata.exs b/interop/script/test_custom_metadata.exs deleted file mode 100644 index cdb820e5c..000000000 --- a/interop/script/test_custom_metadata.exs +++ /dev/null @@ -1,34 +0,0 @@ -# Test custom_metadata with debug output -require Logger -Logger.configure(level: :debug) - -{:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, 0, adapter: GRPC.Server.Adapters.ThousandIsland) -IO.puts("Server started on port #{test_port}") - -opts = [adapter: GRPC.Client.Adapters.Gun] -ch = Interop.Client.connect("127.0.0.1:#{test_port}", opts) - -payload = %Grpc.Testing.Payload{body: String.duplicate(<<0>>, 271_828)} -req = %Grpc.Testing.SimpleRequest{response_size: 314_159, payload: payload} -headers = %{"x-grpc-test-echo-initial" => "test_initial_metadata_value"} -trailers = %{"x-grpc-test-echo-trailing-bin" => 0xABABAB} -metadata = Map.merge(headers, trailers) - -IO.puts("\nSending request with metadata: #{inspect(metadata)}") - -result = Grpc.Testing.TestService.Stub.unary_call(ch, req, metadata: metadata, return_headers: true) - -IO.puts("\nReceived result:") -IO.inspect(result, label: "Result", pretty: true) - -case result do - {:ok, _reply, %{headers: recv_headers, trailers: recv_trailers}} -> - IO.puts("\nReceived headers:") - IO.inspect(recv_headers, label: "Headers", pretty: true) - IO.puts("\nReceived trailers:") - IO.inspect(recv_trailers, label: "Trailers", pretty: true) - _ -> - IO.puts("Unexpected result format") -end - -GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) diff --git a/interop/script/test_one.exs b/interop/script/test_one.exs deleted file mode 100644 index 6c4a30bdb..000000000 --- a/interop/script/test_one.exs +++ /dev/null @@ -1,43 +0,0 @@ -{options, _, _} = - OptionParser.parse(System.argv(), - strict: [test: :string, port: :integer] - ) - -test_name = Keyword.get(options, :test) || "empty_unary" -port = Keyword.get(options, :port) || 0 - -require Logger -Logger.configure(level: :info) - -alias Interop.Client - -{:ok, _pid, test_port} = GRPC.Server.start_endpoint(Interop.Endpoint, port, adapter: GRPC.Server.Adapters.ThousandIsland) -IO.puts("Server started on port #{test_port}") - -opts = [adapter: GRPC.Client.Adapters.Gun] -ch = Client.connect("127.0.0.1:#{test_port}", opts) - -try do - case test_name do - "empty_unary" -> Client.empty_unary!(ch) - "large_unary" -> Client.large_unary!(ch) - "client_streaming" -> Client.client_streaming!(ch) - "server_streaming" -> Client.server_streaming!(ch) - "ping_pong" -> Client.ping_pong!(ch) - "empty_stream" -> Client.empty_stream!(ch) - "custom_metadata" -> Client.custom_metadata!(ch) - "status_code_and_message" -> Client.status_code_and_message!(ch) - "unimplemented_service" -> Client.unimplemented_service!(ch) - "cancel_after_begin" -> Client.cancel_after_begin!(ch) - "timeout_on_sleeping_server" -> Client.timeout_on_sleeping_server!(ch) - _ -> IO.puts("Unknown test: #{test_name}") - end - IO.puts("✓ #{test_name} PASSED") -catch - kind, error -> - IO.puts("✗ #{test_name} FAILED") - IO.puts("Error: #{inspect(kind)} #{inspect(error)}") - IO.puts(Exception.format_stacktrace(__STACKTRACE__)) -end - -GRPC.Server.stop_endpoint(Interop.Endpoint, adapter: GRPC.Server.Adapters.ThousandIsland) From e17c98c64f1a25eb30c5f8a2c463eb84ac8931d0 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 10:47:18 -0300 Subject: [PATCH 21/47] Update grpc_client/lib/grpc/client/application.ex Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- grpc_client/lib/grpc/client/application.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_client/lib/grpc/client/application.ex b/grpc_client/lib/grpc/client/application.ex index 24c2d306d..6a88fef01 100644 --- a/grpc_client/lib/grpc/client/application.ex +++ b/grpc_client/lib/grpc/client/application.ex @@ -7,7 +7,7 @@ defmodule GRPC.Client.Application do {GRPC.Client.Supervisor, [name: GRPC.Client.Supervisor]} ] - opts = [strategy: :one_for_one, name: Grpc.Supervisor] + opts = [strategy: :one_for_one, name: GRPC.Supervisor] Supervisor.start_link(children, opts) end end From 3704a97cea64cfd66329875955eaf352ce158da9 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 10:47:48 -0300 Subject: [PATCH 22/47] Update .gitignore Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- .gitignore | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.gitignore b/.gitignore index 516591502..8a1995dd4 100644 --- a/.gitignore +++ b/.gitignore @@ -24,9 +24,5 @@ erl_crash.dump .elixir_ls -.elixir_tools -grpc_client/.elixir-tools/ -grpc_core/.elixir-tools/ -grpc_server/.elixir-tools/ grpc-*.tar From f4d410e3f0243bd2d07c412c544ede210a584b0e Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 10:51:29 -0300 Subject: [PATCH 23/47] Update grpc_client/lib/grpc/client/application.ex Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- grpc_client/lib/grpc/client/application.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_client/lib/grpc/client/application.ex b/grpc_client/lib/grpc/client/application.ex index 6a88fef01..16d4a230f 100644 --- a/grpc_client/lib/grpc/client/application.ex +++ b/grpc_client/lib/grpc/client/application.ex @@ -4,7 +4,7 @@ defmodule GRPC.Client.Application do def start(_type, _args) do children = [ - {GRPC.Client.Supervisor, [name: GRPC.Client.Supervisor]} + {DynamicSupervisor, [name: GRPC.Client.Supervisor]} ] opts = [strategy: :one_for_one, name: GRPC.Supervisor] From 5daf08a6dc428c16781fe8ef0f355223bc755b88 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 11:02:28 -0300 Subject: [PATCH 24/47] remove bench application.ex --- benchmark/lib/benchmark/application.ex | 12 ------------ benchmark/mix.exs | 1 - 2 files changed, 13 deletions(-) delete mode 100644 benchmark/lib/benchmark/application.ex diff --git a/benchmark/lib/benchmark/application.ex b/benchmark/lib/benchmark/application.ex deleted file mode 100644 index f665f02c9..000000000 --- a/benchmark/lib/benchmark/application.ex +++ /dev/null @@ -1,12 +0,0 @@ -defmodule Benchmark.Application do - @moduledoc false - use Application - - @impl true - def start(_type, _args) do - children = [] - - opts = [strategy: :one_for_one, name: Benchmark.Supervisor] - Supervisor.start_link(children, opts) - end -end diff --git a/benchmark/mix.exs b/benchmark/mix.exs index d65213602..3e0ac0188 100644 --- a/benchmark/mix.exs +++ b/benchmark/mix.exs @@ -15,7 +15,6 @@ defmodule Benchmark.MixProject do def application do [ extra_applications: [:logger], - mod: {Benchmark.Application, []} ] end From 64f90ce603ee7b5d083bb6ba7fd58c9510e3409f Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 11:13:56 -0300 Subject: [PATCH 25/47] fix: remove client supervisor --- grpc_client/lib/grpc/client/supervisor.ex | 63 ----------------------- 1 file changed, 63 deletions(-) delete mode 100644 grpc_client/lib/grpc/client/supervisor.ex diff --git a/grpc_client/lib/grpc/client/supervisor.ex b/grpc_client/lib/grpc/client/supervisor.ex deleted file mode 100644 index ba2d53edd..000000000 --- a/grpc_client/lib/grpc/client/supervisor.ex +++ /dev/null @@ -1,63 +0,0 @@ -defmodule GRPC.Client.Supervisor do - @moduledoc """ - A DynamicSupervisor responsible for managing gRPC client connections (`GRPC.Client.Connection`). - - This supervisor allows you to dynamically start and stop gRPC client connections at runtime. - Each connection is run as a separate `GenServer` under this supervisor, which ensures proper - supervision and isolation between connections. - - ## Starting the Supervisor - - Typically, you start this supervisor as part of your application's supervision tree: - - children = [ - GRPC.Client.Supervisor - ] - - opts = [strategy: :one_for_one, name: MyApp.Supervisor] - Supervisor.start_link(children, opts) - - You can also start it manually in scripts or test environments: - - {:ok, _pid} = GRPC.Client.Supervisor.start_link([]) - - ## Supervision Strategy - - This supervisor uses `:one_for_one` strategy: - - * If a connection process crashes, only that process is restarted. - * Other running connections remain unaffected. - - ## Establishing a gRPC Connection - - To create a new gRPC connection, you typically use the `GRPC.Stub.connect/1` function, - which internally starts a `GRPC.Client.Connection` process under this supervisor. For example: - - iex> {:ok, ch} = GRPC.Stub.connect("127.0.0.1:50051") - iex> Grpc.Testing.TestService.Stub.empty_call(ch, %{}) - - ## Notes - - * You can dynamically start multiple connections under the supervisor for different targets. - * Each connection runs in isolation as its own GenServer. - """ - use DynamicSupervisor - - def start_link(opts) do - case DynamicSupervisor.start_link(__MODULE__, opts, name: __MODULE__) do - {:ok, _pid} = started -> - started - - {:error, {:already_started, pid}} -> - {:ok, pid} - - other -> - other - end - end - - @impl true - def init(_opts) do - DynamicSupervisor.init(strategy: :one_for_one) - end -end From a81fe1f28e9422946571ff29dfaf11e8151f67bc Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 11:16:40 -0300 Subject: [PATCH 26/47] remove supervisor from tests --- grpc_client/test/grpc/supervisor_test.exs | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 grpc_client/test/grpc/supervisor_test.exs diff --git a/grpc_client/test/grpc/supervisor_test.exs b/grpc_client/test/grpc/supervisor_test.exs deleted file mode 100644 index cd83c871f..000000000 --- a/grpc_client/test/grpc/supervisor_test.exs +++ /dev/null @@ -1,14 +0,0 @@ -defmodule GRPC.Client.SupervisorTest do - use ExUnit.Case, async: false - - alias GRPC.Client - - describe "start_link/1" do - test "allows multiple start_links" do - {:ok, second_pid} = Client.Supervisor.start_link([]) - {:ok, third_pid} = Client.Supervisor.start_link([]) - - assert second_pid == third_pid - end - end -end From 9dd320c2c4c00c2d9f521ced350c48d847930ea1 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 12:05:55 -0300 Subject: [PATCH 27/47] remove unused file --- benchmark/bin/profile.exs | 87 --------------------------------------- 1 file changed, 87 deletions(-) delete mode 100755 benchmark/bin/profile.exs diff --git a/benchmark/bin/profile.exs b/benchmark/bin/profile.exs deleted file mode 100755 index 45fa3fb61..000000000 --- a/benchmark/bin/profile.exs +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env elixir - -# Profile script to find bottlenecks in gRPC server -Mix.install([ - {:grpc, path: Path.expand("../..", __DIR__)}, - {:thousand_island, "~> 1.0"} -]) - -# Start the server -server_config = - Grpc.Testing.ServerConfig.new( - async_server_threads: 1, - port: 10000 - ) - -{:ok, _server_pid} = Benchmark.ServerManager.start_server(server_config) -Process.sleep(1000) - -# Connect client -{:ok, ch} = GRPC.Stub.connect("localhost:10000") - -payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) -req = Grpc.Testing.SimpleRequest.new( - response_type: payload_type, - response_size: 0, - payload: Grpc.Testing.Payload.new(type: payload_type, body: <<>>) -) - -# Warm up -IO.puts("Warming up...") -Enum.each(1..100, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) -end) - -IO.puts("\nStarting profiling with :fprof...") - -# Profile with fprof -:fprof.trace([:start, {:procs, :all}]) - -# Run workload -Enum.each(1..1000, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) -end) - -:fprof.trace(:stop) -:fprof.profile() -:fprof.analyse([:totals, {:sort, :acc}, {:dest, ~c"fprof_analysis.txt"}]) - -IO.puts("fprof analysis written to fprof_analysis.txt") - -# Now profile with eprof for better overview -IO.puts("\nStarting profiling with :eprof...") - -:eprof.start() -:eprof.start_profiling([self()]) - -Enum.each(1..1000, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) -end) - -:eprof.stop_profiling() -:eprof.analyze([:total, {:sort, :time}]) -:eprof.log(~c"eprof_analysis.txt") - -IO.puts("\neprof analysis written to eprof_analysis.txt") - -# Analyze specific modules -IO.puts("\nAnalyzing ThousandIsland adapter modules...") -:eprof.start_profiling([self()]) - -Enum.each(1..1000, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) -end) - -:eprof.stop_profiling() -:eprof.analyze([ - :total, - {:sort, :time}, - {:filter, [ - {GRPC.Server.Adapters.ThousandIsland.Handler, :_, :_}, - {GRPC.Server.HTTP2.Connection, :_, :_}, - {GRPC.Server.HTTP2.Dispatcher, :_, :_}, - {GRPC.Server.Cache, :_, :_} - ]} -]) - -IO.puts("\nDone! Check fprof_analysis.txt and eprof_analysis.txt for details.") From 692e8db3df1c50128cc66721e6bc31ba16f497be Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 12:11:57 -0300 Subject: [PATCH 28/47] Update benchmark/config/config.exs Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- benchmark/config/config.exs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/config/config.exs b/benchmark/config/config.exs index 499dfb5fd..17041713a 100644 --- a/benchmark/config/config.exs +++ b/benchmark/config/config.exs @@ -1,6 +1,6 @@ import Config -# Disable logging by default for better performance +# Reduce logging overhead by default for better performance config :logger, level: :error import_config "#{Mix.env()}.exs" From 4e83b4a1f3345377d174b6fdafc39181476b0566 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Fri, 12 Dec 2025 12:12:06 -0300 Subject: [PATCH 29/47] Update benchmark/config/config.exs Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- benchmark/config/config.exs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/config/config.exs b/benchmark/config/config.exs index 17041713a..1246afe81 100644 --- a/benchmark/config/config.exs +++ b/benchmark/config/config.exs @@ -3,4 +3,4 @@ import Config # Reduce logging overhead by default for better performance config :logger, level: :error -import_config "#{Mix.env()}.exs" +import_config "#{config_env()}.exs" From 2a170544719172e9a19c46354c500cffcb3ed364 Mon Sep 17 00:00:00 2001 From: Paulo Valente <16843419+polvalente@users.noreply.github.com> Date: Mon, 15 Dec 2025 07:17:05 -0300 Subject: [PATCH 30/47] Update grpc_server/lib/grpc/server/http2/dispatcher.ex --- grpc_server/lib/grpc/server/http2/dispatcher.ex | 2 -- 1 file changed, 2 deletions(-) diff --git a/grpc_server/lib/grpc/server/http2/dispatcher.ex b/grpc_server/lib/grpc/server/http2/dispatcher.ex index 0746bfbb5..3a87ee8d3 100644 --- a/grpc_server/lib/grpc/server/http2/dispatcher.ex +++ b/grpc_server/lib/grpc/server/http2/dispatcher.ex @@ -104,8 +104,6 @@ defmodule GRPC.Server.HTTP2.Dispatcher do {:error, GRPC.RPCError.exception(status: :internal, message: "Internal server error")} end end - - # Fecha o if deadline_exceeded? end @doc """ From 03a35bcdfc50219e5f86aca0ff6e2ee56ccf7c92 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 11:29:18 -0300 Subject: [PATCH 31/47] adjusts --- benchmark/config/config.exs | 1 - 1 file changed, 1 deletion(-) diff --git a/benchmark/config/config.exs b/benchmark/config/config.exs index 499dfb5fd..2bb27886b 100644 --- a/benchmark/config/config.exs +++ b/benchmark/config/config.exs @@ -1,6 +1,5 @@ import Config -# Disable logging by default for better performance config :logger, level: :error import_config "#{Mix.env()}.exs" From 864590139e685118e9d889a7bbc974d680dbf69f Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 11:30:21 -0300 Subject: [PATCH 32/47] remove unused file --- benchmark/profile_script.exs | 34 ---------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 benchmark/profile_script.exs diff --git a/benchmark/profile_script.exs b/benchmark/profile_script.exs deleted file mode 100644 index f2521cfc2..000000000 --- a/benchmark/profile_script.exs +++ /dev/null @@ -1,34 +0,0 @@ -# Script to profile gRPC server performance -# Run with: mix profile.eprof profile_script.exs -# Note: mix profile.eprof runs this script twice (warmup + profile) - -# Disable logging -Logger.configure(level: :error) - -# Start the server -{:ok, _} = Application.ensure_all_started(:grpc_server) -{:ok, _} = Application.ensure_all_started(:benchmark) - -server = %Grpc.Testing.ServerConfig{ - async_server_threads: 1, - port: 10000 -} - -_server = Benchmark.ServerManager.start_server(server, adapter: GRPC.Server.Adapters.ThousandIsland) -Process.sleep(500) - -# Connect client -{:ok, ch} = GRPC.Stub.connect("localhost:10000") - -# Prepare request -payload_type = Grpc.Testing.PayloadType.value(:COMPRESSABLE) -req = %Grpc.Testing.SimpleRequest{ - response_type: payload_type, - response_size: 0, - payload: %Grpc.Testing.Payload{type: payload_type, body: <<>>} -} - -# This code will be profiled -Enum.each(1..500, fn _ -> - Grpc.Testing.BenchmarkService.Stub.unary_call(ch, req) -end) From c2aabfa8f8cc8e7733e303954238a8d41f39ce86 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 14:29:46 -0300 Subject: [PATCH 33/47] =?UTF-8?q?removed=20portugu=C3=AAs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../lib/grpc/server/http2/connection.ex | 48 ++++++------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/grpc_server/lib/grpc/server/http2/connection.ex b/grpc_server/lib/grpc/server/http2/connection.ex index f4c618b1d..b6310116e 100644 --- a/grpc_server/lib/grpc/server/http2/connection.ex +++ b/grpc_server/lib/grpc/server/http2/connection.ex @@ -596,16 +596,20 @@ defmodule GRPC.Server.HTTP2.Connection do Logger.debug("[process_grpc_request] Bidi stream already started, feeding new messages") stream_state = extract_messages_from_buffer(stream_state) - # Feed messages to the BidiStream Task after decoding them + # Feed messages to the BidiStream Task in {flag, data} format (not decoded yet) if length(stream_state.message_buffer) > 0 and stream_state.bidi_stream_pid do Logger.info( - "[Connection] Decoding and feeding #{length(stream_state.message_buffer)} messages to BidiStream #{stream_state.stream_id}, pid=#{inspect(stream_state.bidi_stream_pid)}" + "[Connection] Feeding #{length(stream_state.message_buffer)} messages to BidiStream #{stream_state.stream_id}, pid=#{inspect(stream_state.bidi_stream_pid)}" ) - # Decode the messages using codec, compressor, and RPC from stream_state - decoded_messages = decode_stream_messages(stream_state.message_buffer, stream_state) + # Convert messages to {flag, data} format expected by GRPC.Server.do_handle_request + messages_as_tuples = + Enum.map(stream_state.message_buffer, fn %{compressed: compressed?, data: data} -> + flag = if compressed?, do: 1, else: 0 + {flag, data} + end) - GRPC.Server.BidiStream.put_messages(stream_state.bidi_stream_pid, decoded_messages) + GRPC.Server.BidiStream.put_messages(stream_state.bidi_stream_pid, messages_as_tuples) # Clear both message_buffer and data_buffer after feeding _stream_state = %{stream_state | message_buffer: [], data_buffer: <<>>} end @@ -924,27 +928,6 @@ defmodule GRPC.Server.HTTP2.Connection do {acc, buffer} end - defp decode_stream_messages(message_buffer, stream_state) do - # Extract request type from RPC definition - # RPC format: {name, {request_module, is_stream?}, {reply_module, is_stream?}, options} - {_name, {request_module, _is_stream?}, _reply, _options} = stream_state.rpc - codec = stream_state.codec - compressor = stream_state.compressor - - Enum.map(message_buffer, fn %{compressed: compressed?, data: data} -> - # Decompress if needed - data = - if compressed? and compressor do - compressor.decompress(data) - else - data - end - - # Decode protobuf with the request module - codec.decode(data, request_module) - end) - end - # Made public so Handler can call it when deadline exceeded during send_reply def send_grpc_error(socket, stream_id, error, connection) do status = Map.get(error, :status, :unknown) @@ -955,7 +938,6 @@ defmodule GRPC.Server.HTTP2.Connection do stream_state = Map.get(connection.streams, stream_id) - # Se o stream não existe OU já enviamos erro, não fazer nada if !stream_state do Logger.debug("[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream not found") connection @@ -971,9 +953,9 @@ defmodule GRPC.Server.HTTP2.Connection do "[SEND_GRPC_ERROR] stream=#{stream_id}, status=#{status_code}, message=#{message}, headers_sent=#{headers_sent}, end_stream_received=#{end_stream_received}" ) - # RFC 7540 Section 5.1: Se já enviamos END_STREAM (via headers_sent=true em resposta anterior), - # o stream está "closed" e NÃO podemos enviar mais frames (exceto PRIORITY) - # Nesse caso, apenas remover o stream e não enviar nada + # RFC 7540 Section 5.1: If we already sent END_STREAM (via headers_sent=true in previous response), + # the stream is "closed" and we CANNOT send more frames (except PRIORITY) + # In this case, just remove the stream and don't send anything if headers_sent && end_stream_received do Logger.debug( "[SEND_GRPC_ERROR] stream=#{stream_id} SKIPPED - stream is fully closed (both sides sent END_STREAM)" @@ -1033,7 +1015,7 @@ defmodule GRPC.Server.HTTP2.Connection do # Verify stream still exists before marking error_sent (may have been closed by RST_STREAM) updated_connection = if Map.has_key?(updated_connection.streams, stream_id) do - # Marcar que já enviamos erro (RFC 7540: após END_STREAM, o stream está closed) + # Mark that we already sent error (RFC 7540: after END_STREAM, the stream is closed) update_in(updated_connection.streams[stream_id], fn s -> if s, do: %{s | error_sent: true}, else: nil end) @@ -1045,8 +1027,8 @@ defmodule GRPC.Server.HTTP2.Connection do updated_connection end - # RFC 7540: Após enviar END_STREAM, o stream transiciona para "closed" - # Remover imediatamente para evitar processar mais mensagens neste stream + # RFC 7540: After sending END_STREAM, the stream transitions to "closed" + # Remove immediately to avoid processing more messages on this stream Logger.warning("[REMOVE_STREAM] stream=#{stream_id} - removed after sending error") %{updated_connection | streams: Map.delete(updated_connection.streams, stream_id)} end From 1a29fc11825146de96ef274e77b3e0ce4362f54f Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 14:30:17 -0300 Subject: [PATCH 34/47] fix: bid_stream integration test & run more than one adapter in run_server --- .../thousand_island_adapter_test.exs | 269 ++++++++++-------- .../test/support/integration_data_case.ex | 12 +- .../grpc/server/adapters/thousand_island.ex | 5 + .../adapters/thousand_island/handler.ex | 1 + .../lib/grpc/server/http2/dispatcher.ex | 64 +++-- grpc_server/lib/grpc/stream.ex | 6 +- 6 files changed, 217 insertions(+), 140 deletions(-) diff --git a/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs b/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs index 53e543230..804562b9b 100644 --- a/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs +++ b/grpc_client/test/grpc/integration/thousand_island_adapter_test.exs @@ -5,7 +5,10 @@ defmodule GRPC.Integration.ThousandIslandAdapterTest do use GRPC.Integration.TestCase - # Test server for unary RPC + setup do + {:ok, adapter_opts: [adapter: GRPC.Server.Adapters.ThousandIsland]} + end + defmodule HelloServer do use GRPC.Server, service: Helloworld.Greeter.Service @@ -18,9 +21,9 @@ defmodule GRPC.Integration.ThousandIslandAdapterTest do end end - # Test server for all streaming types defmodule RouteServer do use GRPC.Server, service: Routeguide.RouteGuide.Service + require Logger def get_feature(point, materializer) do GRPC.Stream.unary(point, materializer: materializer) @@ -63,7 +66,7 @@ defmodule GRPC.Integration.ThousandIslandAdapterTest do GRPC.Stream.from(note_stream) |> GRPC.Stream.map(fn note -> %Routeguide.RouteNote{ - location: note.location, + # location: note.location, message: "Echo: #{note.message}" } end) @@ -72,159 +75,191 @@ defmodule GRPC.Integration.ThousandIslandAdapterTest do end describe "ThousandIsland adapter - unary RPC" do - test "handles simple unary request/response" do - run_server([HelloServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + test "handles simple unary request/response", %{adapter_opts: adapter_opts} do + run_server( + [HelloServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - request = %Helloworld.HelloRequest{name: "ThousandIsland"} - {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + request = %Helloworld.HelloRequest{name: "ThousandIsland"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) - assert response.message == "Hello ThousandIsland!" + assert response.message == "Hello ThousandIsland!" - GRPC.Stub.disconnect(channel) - end) + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end - test "handles multiple sequential unary calls" do - run_server([HelloServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - for i <- 1..10 do - request = %Helloworld.HelloRequest{name: "User#{i}"} - {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) - assert response.message == "Hello User#{i}!" - end - - GRPC.Stub.disconnect(channel) - end) + test "handles multiple sequential unary calls", %{adapter_opts: adapter_opts} do + run_server( + [HelloServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + for i <- 1..10 do + request = %Helloworld.HelloRequest{name: "User#{i}"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + assert response.message == "Hello User#{i}!" + end + + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end end describe "ThousandIsland adapter - server streaming RPC" do - test "receives multiple responses from server" do - run_server([RouteServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - rectangle = %Routeguide.Rectangle{ - lo: %Routeguide.Point{latitude: 0, longitude: 0}, - hi: %Routeguide.Point{latitude: 100, longitude: 100} - } + test "receives multiple responses from server", %{adapter_opts: adapter_opts} do + run_server( + [RouteServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + rectangle = %Routeguide.Rectangle{ + lo: %Routeguide.Point{latitude: 0, longitude: 0}, + hi: %Routeguide.Point{latitude: 100, longitude: 100} + } - {:ok, stream} = channel |> Routeguide.RouteGuide.Stub.list_features(rectangle) + {:ok, stream} = channel |> Routeguide.RouteGuide.Stub.list_features(rectangle) - features = stream |> Enum.map(fn {:ok, f} -> f end) |> Enum.to_list() + features = stream |> Enum.map(fn {:ok, f} -> f end) |> Enum.to_list() - assert length(features) == 5 + assert length(features) == 5 - Enum.each(1..5, fn i -> - feature = Enum.at(features, i - 1) - assert feature.name == "Feature #{i}" - assert feature.location.latitude == i * 10 - assert feature.location.longitude == i * 20 - end) + Enum.each(1..5, fn i -> + feature = Enum.at(features, i - 1) + assert feature.name == "Feature #{i}" + assert feature.location.latitude == i * 10 + assert feature.location.longitude == i * 20 + end) - GRPC.Stub.disconnect(channel) - end) + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end end describe "ThousandIsland adapter - client streaming RPC" do - test "sends multiple requests and receives single response" do - run_server([RouteServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - points = [ - %Routeguide.Point{latitude: 10, longitude: 20}, - %Routeguide.Point{latitude: 30, longitude: 40}, - %Routeguide.Point{latitude: 50, longitude: 60} - ] - - stream = channel |> Routeguide.RouteGuide.Stub.record_route() - - Enum.each(points, fn point -> - GRPC.Stub.send_request(stream, point) - end) + test "sends multiple requests and receives single response", %{adapter_opts: adapter_opts} do + run_server( + [RouteServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + points = [ + %Routeguide.Point{latitude: 10, longitude: 20}, + %Routeguide.Point{latitude: 30, longitude: 40}, + %Routeguide.Point{latitude: 50, longitude: 60} + ] + + stream = channel |> Routeguide.RouteGuide.Stub.record_route() + + Enum.each(points, fn point -> + GRPC.Stub.send_request(stream, point) + end) - GRPC.Stub.end_stream(stream) + GRPC.Stub.end_stream(stream) - {:ok, summary} = GRPC.Stub.recv(stream) + {:ok, summary} = GRPC.Stub.recv(stream) - assert summary.point_count == 3 - assert summary.feature_count == 3 - assert summary.distance == 300 + assert summary.point_count == 3 + assert summary.feature_count == 3 + assert summary.distance == 300 - GRPC.Stub.disconnect(channel) - end) + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end end describe "ThousandIsland adapter - bidirectional streaming RPC" do - test "exchanges messages bidirectionally" do - run_server([RouteServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - notes = [ - %Routeguide.RouteNote{ - location: %Routeguide.Point{latitude: 1, longitude: 2}, - message: "First note" - }, - %Routeguide.RouteNote{ - location: %Routeguide.Point{latitude: 3, longitude: 4}, - message: "Second note" - }, - %Routeguide.RouteNote{ - location: %Routeguide.Point{latitude: 5, longitude: 6}, - message: "Third note" - } - ] - - bidi_stream = channel |> Routeguide.RouteGuide.Stub.route_chat() - - Enum.each(notes, fn note -> - GRPC.Stub.send_request(bidi_stream, note) - end) + test "exchanges messages bidirectionally", %{adapter_opts: adapter_opts} do + run_server( + [RouteServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + notes = [ + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 1, longitude: 2}, + message: "First note" + }, + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 3, longitude: 4}, + message: "Second note" + }, + %Routeguide.RouteNote{ + location: %Routeguide.Point{latitude: 5, longitude: 6}, + message: "Third note" + } + ] + + bidi_stream = channel |> Routeguide.RouteGuide.Stub.route_chat() + + Enum.each(notes, fn note -> + GRPC.Stub.send_request(bidi_stream, note) + end) - GRPC.Stub.end_stream(bidi_stream) + GRPC.Stub.end_stream(bidi_stream) - {:ok, response_stream} = GRPC.Stub.recv(bidi_stream) - responses = response_stream |> Enum.map(fn {:ok, r} -> r end) |> Enum.to_list() + {:ok, response_stream} = GRPC.Stub.recv(bidi_stream) + responses = response_stream |> Enum.map(fn {:ok, r} -> r end) |> Enum.to_list() - assert length(responses) == 3 - assert Enum.at(responses, 0).message == "Echo: First note" - assert Enum.at(responses, 1).message == "Echo: Second note" - assert Enum.at(responses, 2).message == "Echo: Third note" + assert length(responses) == 3 + assert Enum.at(responses, 0).message == "Echo: First note" + assert Enum.at(responses, 1).message == "Echo: Second note" + assert Enum.at(responses, 2).message == "Echo: Third note" - GRPC.Stub.disconnect(channel) - end) + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end end describe "ThousandIsland adapter - HTTP/2 protocol validation" do - test "handles multiple concurrent unary calls on same connection" do - run_server([HelloServer], fn port -> - {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") - - tasks = - 1..10 - |> Enum.map(fn i -> - Task.async(fn -> - request = %Helloworld.HelloRequest{name: "Concurrent#{i}"} - {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) - response + test "handles multiple concurrent unary calls on same connection", %{ + adapter_opts: adapter_opts + } do + run_server( + [HelloServer], + fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}") + + tasks = + 1..10 + |> Enum.map(fn i -> + Task.async(fn -> + request = %Helloworld.HelloRequest{name: "Concurrent#{i}"} + {:ok, response} = channel |> Helloworld.Greeter.Stub.say_hello(request) + response + end) end) - end) - responses = Task.await_many(tasks, 5000) + responses = Task.await_many(tasks, 5000) - assert length(responses) == 10 + assert length(responses) == 10 - Enum.each(1..10, fn i -> - response = Enum.find(responses, fn r -> r.message == "Hello Concurrent#{i}!" end) - assert response != nil - end) + Enum.each(1..10, fn i -> + response = Enum.find(responses, fn r -> r.message == "Hello Concurrent#{i}!" end) + assert response != nil + end) - GRPC.Stub.disconnect(channel) - end) + GRPC.Stub.disconnect(channel) + end, + 0, + adapter_opts + ) end end end diff --git a/grpc_client/test/support/integration_data_case.ex b/grpc_client/test/support/integration_data_case.ex index b51aecd19..69db0dc8a 100644 --- a/grpc_client/test/support/integration_data_case.ex +++ b/grpc_client/test/support/integration_data_case.ex @@ -22,15 +22,17 @@ defmodule GRPC.Integration.TestCase do try do func.(port) after - :ok = GRPC.Server.stop(servers) + # GRPC.Server.stop only accepts :adapter option + stop_opts = Keyword.take(opts, [:adapter]) + :ok = GRPC.Server.stop(servers, stop_opts) end end - def run_endpoint(endpoint, func, port \\ 0) do + def run_endpoint(endpoint, func, port \\ 0, opts \\ []) do {:ok, _pid, port} = start_supervised(%{ id: {GRPC.Server, System.unique_integer([:positive])}, - start: {GRPC.Server, :start_endpoint, [endpoint, port]}, + start: {GRPC.Server, :start_endpoint, [endpoint, port, opts]}, type: :worker, restart: :permanent, shutdown: 500 @@ -39,7 +41,9 @@ defmodule GRPC.Integration.TestCase do try do func.(port) after - :ok = GRPC.Server.stop_endpoint(endpoint, []) + # GRPC.Server.stop_endpoint only accepts :adapter option + stop_opts = Keyword.take(opts, [:adapter]) + :ok = GRPC.Server.stop_endpoint(endpoint, stop_opts) end end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index 6ad9c49e2..8a679d0f5 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -454,6 +454,11 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end @spec reading_stream(GRPC.Server.Adapter.state()) :: Enumerable.t() + def reading_stream(%{stream_state: %{bidi_stream_pid: bidi_pid}}) when not is_nil(bidi_pid) do + # For bidi streaming, return the lazy stream from BidiStream + GRPC.Server.BidiStream.to_enum(bidi_pid) + end + def reading_stream(%{data: data}) do # Create a stream that yields the data once Stream.unfold({data, false}, fn diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index 28ed344e3..1aa552ae1 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -114,6 +114,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do new_state = if map_size(accumulated) > 0 do updated_conn = Connection.send_headers(socket, stream_id, accumulated, state.connection) + %{ state | accumulated_headers: Map.delete(state.accumulated_headers, stream_id), diff --git a/grpc_server/lib/grpc/server/http2/dispatcher.ex b/grpc_server/lib/grpc/server/http2/dispatcher.ex index 3a87ee8d3..7a8695c94 100644 --- a/grpc_server/lib/grpc/server/http2/dispatcher.ex +++ b/grpc_server/lib/grpc/server/http2/dispatcher.ex @@ -271,7 +271,10 @@ defmodule GRPC.Server.HTTP2.Dispatcher do Logger.info("[call_unary] Response received, sending via send_reply") # Send response using async message (this will send accumulated headers first) - GRPC.Server.send_reply(stream, response) + # :noreply means GRPC.Stream.run() already sent the response + if response != :noreply do + GRPC.Server.send_reply(stream, response) + end # Get custom trailers from process dictionary (set by handler via set_trailers) stream_id = stream.payload.stream_id @@ -521,21 +524,29 @@ defmodule GRPC.Server.HTTP2.Dispatcher do end end - defp call_bidi_streaming(server, func_name, requests, stream, _connection) do + defp call_bidi_streaming(server, rpc, func_name, stream_state, stream, _connection) do # Check if function is implemented if function_exported?(server, func_name, 2) do - stream_id = stream.payload.stream_state.stream_id + stream_id = stream_state.stream_id + message_buffer = stream_state.message_buffer Logger.info( - "[call_bidi_streaming] Starting bidi stream #{stream_id} with #{length(requests)} initial requests" + "[call_bidi_streaming] Starting bidi stream #{stream_id} with #{length(message_buffer)} initial requests" ) try do # Mark as streaming mode so send_headers will send immediately Process.put(:grpc_streaming_mode, true) - # Start BidiStream Task with initial messages - {:ok, bidi_pid} = GRPC.Server.BidiStream.start_link(stream_id, requests) + # Convert initial messages to {flag, data} format + initial_messages = + Enum.map(message_buffer, fn %{compressed: compressed?, data: data} -> + flag = if compressed?, do: 1, else: 0 + {flag, data} + end) + + # Start BidiStream Task with initial messages in {flag, data} format + {:ok, bidi_pid} = GRPC.Server.BidiStream.start_link(stream_id, initial_messages) Logger.info( "[call_bidi_streaming] BidiStream task started for stream #{stream_id}, pid=#{inspect(bidi_pid)}" @@ -570,9 +581,6 @@ defmodule GRPC.Server.HTTP2.Dispatcher do Process.put({:bidi_stream_pid, stream_id}, bidi_pid) Process.put({:bidi_stream_state, stream_id}, updated_stream_state) - # Create lazy enumerable from BidiStream - request_enum = GRPC.Server.BidiStream.to_enum(bidi_pid) - # Accumulate base headers (don't send yet - handler may add custom headers) base_headers = %{ ":status" => "200", @@ -590,23 +598,39 @@ defmodule GRPC.Server.HTTP2.Dispatcher do # Accumulate base headers without sending GRPC.Server.set_headers(stream, base_headers) + # Update the stream's payload to include bidi_stream_pid so adapter.reading_stream() can access it + updated_stream = %{ + stream + | payload: %{stream.payload | stream_state: updated_stream_state} + } + # CRITICAL: Run handler in a separate task to not block the connection handler # The connection handler MUST continue processing incoming DATA frames # and feed them to the BidiStream while the handler is consuming messages Task.start(fn -> try do - # Handler receives lazy request stream and sends responses via stream - result = apply(server, func_name, [request_enum, stream]) + # Use GRPC.Server.call to properly handle the request + # This ensures the reading_stream is created correctly via adapter.reading_stream + result = GRPC.Server.call(server, updated_stream, rpc, func_name) Logger.info("[call_bidi_streaming] Handler returned: #{inspect(result)}") - # Get custom trailers from process dictionary (set by handler via set_trailers) - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + case result do + {:ok, stream} -> + # Get custom trailers from process dictionary + custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) + trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + {:error, error} -> + Logger.error("[call_bidi_streaming] Handler error result: #{inspect(error)}") - # Merge with mandatory grpc-status - trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) + trailers = %{ + "grpc-status" => "#{error.status || 2}", + "grpc-message" => error.message || "Handler error" + } - # Send trailers at the end with END_STREAM - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + end rescue e in GRPC.RPCError -> Logger.error("[call_bidi_streaming] Handler RPC Error: #{inspect(e)}") @@ -780,6 +804,9 @@ defmodule GRPC.Server.HTTP2.Dispatcher do {_name, {req_mod, req_stream?}, {res_mod, res_stream?}, _opts} = rpc func_name = Macro.underscore(method_name) |> String.to_atom() + # Determine gRPC type based on streaming flags + grpc_type = GRPC.Service.grpc_type(rpc) + # Create a payload struct with metadata and handler info for streaming payload = %{ headers: stream_state.metadata, @@ -791,6 +818,7 @@ defmodule GRPC.Server.HTTP2.Dispatcher do grpc_stream = %GRPC.Server.Stream{ server: server, endpoint: endpoint, + grpc_type: grpc_type, request_mod: req_mod, response_mod: res_mod, rpc: rpc, @@ -813,7 +841,7 @@ defmodule GRPC.Server.HTTP2.Dispatcher do call_server_streaming(server, func_name, request, grpc_stream) {true, true} -> - call_bidi_streaming(server, func_name, requests, grpc_stream, connection) + call_bidi_streaming(server, rpc, func_name, stream_state, grpc_stream, connection) end end end diff --git a/grpc_server/lib/grpc/stream.ex b/grpc_server/lib/grpc/stream.ex index 84b115ba1..9889b417a 100644 --- a/grpc_server/lib/grpc/stream.ex +++ b/grpc_server/lib/grpc/stream.ex @@ -542,7 +542,11 @@ defmodule GRPC.Stream do dry_run? = Keyword.get(opts, :dry_run, false) if not dry_run? do - GRPC.Server.send_reply(from, msg) + # RPCError should be raised, not sent as reply + case msg do + %GRPC.RPCError{} -> raise msg + _ -> GRPC.Server.send_reply(from, msg) + end end end end From 8f3e833e2b6694f0b3bd4e9ede72c4cc1938234e Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 14:33:09 -0300 Subject: [PATCH 35/47] chore: better comment --- grpc_core/lib/grpc/message.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_core/lib/grpc/message.ex b/grpc_core/lib/grpc/message.ex index 8fb95df6b..631d48d89 100644 --- a/grpc_core/lib/grpc/message.ex +++ b/grpc_core/lib/grpc/message.ex @@ -15,7 +15,7 @@ defmodule GRPC.Message do @max_message_length Bitwise.bsl(1, 32 - 1) - # Inline hot path functions + # Inline hot path functions, this reduces between 07-10% of overhead in benchmarks @compile {:inline, to_data: 2, from_data: 1} @doc """ From b876a386cc3f76592985b0a1c60f20e79803a2ef Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 15:05:02 -0300 Subject: [PATCH 36/47] refactor: remove hexa notation --- grpc_core/lib/grpc/message.ex | 49 +++++++++---------- grpc_core/lib/grpc/transport/http2/frame.ex | 41 ++++++---------- .../transport/http2/frame/continuation.ex | 4 +- .../lib/grpc/transport/http2/frame/data.ex | 6 +-- .../lib/grpc/transport/http2/frame/flags.ex | 11 +++++ .../lib/grpc/transport/http2/frame/goaway.ex | 2 +- .../lib/grpc/transport/http2/frame/headers.ex | 12 ++--- .../lib/grpc/transport/http2/frame/ping.ex | 6 +-- .../grpc/transport/http2/frame/priority.ex | 2 +- .../transport/http2/frame/push_promise.ex | 4 +- .../grpc/transport/http2/frame/rst_stream.ex | 2 +- .../grpc/transport/http2/frame/settings.ex | 36 +++++++------- .../transport/http2/frame/window_update.ex | 2 +- 13 files changed, 88 insertions(+), 89 deletions(-) create mode 100644 grpc_core/lib/grpc/transport/http2/frame/flags.ex diff --git a/grpc_core/lib/grpc/message.ex b/grpc_core/lib/grpc/message.ex index 631d48d89..2e0b3f0c5 100644 --- a/grpc_core/lib/grpc/message.ex +++ b/grpc_core/lib/grpc/message.ex @@ -55,31 +55,30 @@ defmodule GRPC.Message do length = IO.iodata_length(compressed_message) - case length > max_length do - true -> - {:error, "Encoded message is too large (#{length} bytes)"} - - false -> - result = [compress_flag, <>, compressed_message] - - result = - case opts[:codec] do - nil -> - result - - codec when is_atom(codec) -> - if function_exported?(codec, :pack_for_channel, 1), - do: codec.pack_for_channel(result), - else: result - end - - result = - case opts[:iolist] do - true -> result - _ -> IO.iodata_to_binary(result) - end - - {:ok, result, length + 5} + if length <= max_length do + result = [compress_flag, <>, compressed_message] + + result = + if opts[:codec] != nil and is_atom(opts[:codec]) do + codec = opts[:codec] + + if function_exported?(codec, :pack_for_channel, 1), + do: codec.pack_for_channel(result), + else: result + else + result + end + + result = + if opts[:iolist] == true do + result + else + IO.iodata_to_binary(result) + end + + {:ok, result, length + 5} + else + {:error, "Encoded message is too large (#{length} bytes)"} end end diff --git a/grpc_core/lib/grpc/transport/http2/frame.ex b/grpc_core/lib/grpc/transport/http2/frame.ex index a93a3ba91..64829cbd8 100644 --- a/grpc_core/lib/grpc/transport/http2/frame.ex +++ b/grpc_core/lib/grpc/transport/http2/frame.ex @@ -32,21 +32,7 @@ defmodule GRPC.Transport.HTTP2.Frame do max_frame_size ) when length <= max_frame_size do - type - |> case do - 0x0 -> GRPC.Transport.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) - 0x1 -> GRPC.Transport.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) - 0x2 -> GRPC.Transport.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) - 0x3 -> GRPC.Transport.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) - 0x4 -> GRPC.Transport.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) - 0x5 -> GRPC.Transport.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) - 0x6 -> GRPC.Transport.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) - 0x7 -> GRPC.Transport.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) - 0x8 -> GRPC.Transport.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) - 0x9 -> GRPC.Transport.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) - _unknown -> GRPC.Transport.HTTP2.Frame.Unknown.deserialize(type, flags, stream_id, payload) - end - |> case do + case deserialize_frame_by_type(type, flags, stream_id, payload) do {:ok, frame} -> {{:ok, frame}, rest} {:error, error_code, reason} -> {{:error, error_code, reason}, rest} end @@ -70,17 +56,20 @@ defmodule GRPC.Transport.HTTP2.Frame do {{:more, msg}, <<>>} end - defmodule Flags do - @moduledoc false - - import Bitwise - - defguard set?(flags, bit) when band(flags, bsl(1, bit)) != 0 - defguard clear?(flags, bit) when band(flags, bsl(1, bit)) == 0 - - @spec set([0..255]) :: 0..255 - def set([]), do: 0x0 - def set([bit | rest]), do: bor(bsl(1, bit), set(rest)) + defp deserialize_frame_by_type(type, flags, stream_id, payload) do + case type do + 0 -> GRPC.Transport.HTTP2.Frame.Data.deserialize(flags, stream_id, payload) + 1 -> GRPC.Transport.HTTP2.Frame.Headers.deserialize(flags, stream_id, payload) + 2 -> GRPC.Transport.HTTP2.Frame.Priority.deserialize(flags, stream_id, payload) + 3 -> GRPC.Transport.HTTP2.Frame.RstStream.deserialize(flags, stream_id, payload) + 4 -> GRPC.Transport.HTTP2.Frame.Settings.deserialize(flags, stream_id, payload) + 5 -> GRPC.Transport.HTTP2.Frame.PushPromise.deserialize(flags, stream_id, payload) + 6 -> GRPC.Transport.HTTP2.Frame.Ping.deserialize(flags, stream_id, payload) + 7 -> GRPC.Transport.HTTP2.Frame.Goaway.deserialize(flags, stream_id, payload) + 8 -> GRPC.Transport.HTTP2.Frame.WindowUpdate.deserialize(flags, stream_id, payload) + 9 -> GRPC.Transport.HTTP2.Frame.Continuation.deserialize(flags, stream_id, payload) + _unknown -> GRPC.Transport.HTTP2.Frame.Unknown.deserialize(type, flags, stream_id, payload) + end end defprotocol Serializable do diff --git a/grpc_core/lib/grpc/transport/http2/frame/continuation.ex b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex index 6c5195e45..a9caea598 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/continuation.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/continuation.ex @@ -43,13 +43,13 @@ defmodule GRPC.Transport.HTTP2.Frame.Continuation do fragment_length = IO.iodata_length(frame.fragment) if fragment_length <= max_frame_size do - [{0x9, set([@end_headers_bit]), frame.stream_id, frame.fragment}] + [{9, set([@end_headers_bit]), frame.stream_id, frame.fragment}] else <> = IO.iodata_to_binary(frame.fragment) [ - {0x9, 0x00, frame.stream_id, this_frame} + {9, 0, frame.stream_id, this_frame} | GRPC.Transport.HTTP2.Frame.Serializable.serialize( %GRPC.Transport.HTTP2.Frame.Continuation{ stream_id: frame.stream_id, diff --git a/grpc_core/lib/grpc/transport/http2/frame/data.ex b/grpc_core/lib/grpc/transport/http2/frame/data.ex index 78cd5a04f..f5a439577 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/data.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/data.ex @@ -38,7 +38,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Data do }} end - def deserialize(flags, stream_id, <>) when clear?(flags, @padding_bit) do + def deserialize(flags, stream_id, <>) when not set?(flags, @padding_bit) do {:ok, %__MODULE__{ stream_id: stream_id, @@ -61,13 +61,13 @@ defmodule GRPC.Transport.HTTP2.Frame.Data do if data_length <= max_frame_size do flags = if frame.end_stream, do: [@end_stream_bit], else: [] - [{0x0, set(flags), frame.stream_id, frame.data}] + [{0, set(flags), frame.stream_id, frame.data}] else <> = IO.iodata_to_binary(frame.data) [ - {0x0, 0x00, frame.stream_id, this_frame} + {0, 0, frame.stream_id, this_frame} | GRPC.Transport.HTTP2.Frame.Serializable.serialize( %GRPC.Transport.HTTP2.Frame.Data{ stream_id: frame.stream_id, diff --git a/grpc_core/lib/grpc/transport/http2/frame/flags.ex b/grpc_core/lib/grpc/transport/http2/frame/flags.ex new file mode 100644 index 000000000..2d500b37a --- /dev/null +++ b/grpc_core/lib/grpc/transport/http2/frame/flags.ex @@ -0,0 +1,11 @@ +defmodule GRPC.Transport.HTTP2.Frame.Flags do + @moduledoc false + import Bitwise + + defguard set?(flags, bit) when band(flags, bsl(1, bit)) != 0 + + @spec set(list(0..7)) :: 0..255 + def set(bits) do + Enum.reduce(bits, 0, fn bit, acc -> bor(acc, bsl(1, bit)) end) + end +end diff --git a/grpc_core/lib/grpc/transport/http2/frame/goaway.ex b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex index 21aa9dc71..4bb3fe609 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/goaway.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/goaway.ex @@ -33,7 +33,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Goaway do defimpl GRPC.Transport.HTTP2.Frame.Serializable do def serialize(%GRPC.Transport.HTTP2.Frame.Goaway{} = frame, _max_frame_size) do payload = <<0::1, frame.last_stream_id::31, frame.error_code::32, frame.debug_data::binary>> - [{0x7, 0x0, 0, payload}] + [{7, 0, 0, payload}] end end end diff --git a/grpc_core/lib/grpc/transport/http2/frame/headers.ex b/grpc_core/lib/grpc/transport/http2/frame/headers.ex index 3cd664d40..df8cf92d8 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/headers.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/headers.ex @@ -52,7 +52,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do stream_id: stream_id, end_stream: set?(flags, @end_stream_bit), end_headers: set?(flags, @end_headers_bit), - exclusive_dependency: exclusive_dependency == 0x01, + exclusive_dependency: exclusive_dependency == 1, stream_dependency: stream_dependency, weight: weight, fragment: binary_part(rest, 0, byte_size(rest) - padding_length) @@ -61,7 +61,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do # Padding but not priority def deserialize(flags, stream_id, <>) - when set?(flags, @padding_bit) and clear?(flags, @priority_bit) and + when set?(flags, @padding_bit) and not set?(flags, @priority_bit) and byte_size(rest) >= padding_length do {:ok, %__MODULE__{ @@ -90,7 +90,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do stream_id: stream_id, end_stream: set?(flags, @end_stream_bit), end_headers: set?(flags, @end_headers_bit), - exclusive_dependency: exclusive_dependency == 0x01, + exclusive_dependency: exclusive_dependency == 1, stream_dependency: stream_dependency, weight: weight, fragment: fragment @@ -98,7 +98,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do end def deserialize(flags, stream_id, <>) - when clear?(flags, @priority_bit) and clear?(flags, @padding_bit) do + when not set?(flags, @priority_bit) and not set?(flags, @padding_bit) do {:ok, %__MODULE__{ stream_id: stream_id, @@ -126,13 +126,13 @@ defmodule GRPC.Transport.HTTP2.Frame.Headers do fragment_length = IO.iodata_length(frame.fragment) if fragment_length <= max_frame_size do - [{0x1, set([@end_headers_bit | flags]), frame.stream_id, frame.fragment}] + [{1, set([@end_headers_bit | flags]), frame.stream_id, frame.fragment}] else <> = IO.iodata_to_binary(frame.fragment) [ - {0x1, set(flags), frame.stream_id, this_frame} + {1, set(flags), frame.stream_id, this_frame} | GRPC.Transport.HTTP2.Frame.Serializable.serialize( %GRPC.Transport.HTTP2.Frame.Continuation{ stream_id: frame.stream_id, diff --git a/grpc_core/lib/grpc/transport/http2/frame/ping.ex b/grpc_core/lib/grpc/transport/http2/frame/ping.ex index b238c7fdd..6dd2cbfc2 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/ping.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/ping.ex @@ -23,7 +23,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Ping do {:ok, %__MODULE__{ack: true, payload: payload}} end - def deserialize(flags, 0, <>) when clear?(flags, @ack_bit) do + def deserialize(flags, 0, <>) when not set?(flags, @ack_bit) do {:ok, %__MODULE__{ack: false, payload: payload}} end @@ -41,9 +41,9 @@ defmodule GRPC.Transport.HTTP2.Frame.Ping do @ack_bit 0 def serialize(%GRPC.Transport.HTTP2.Frame.Ping{ack: true} = frame, _max_frame_size), - do: [{0x6, set([@ack_bit]), 0, frame.payload}] + do: [{6, set([@ack_bit]), 0, frame.payload}] def serialize(%GRPC.Transport.HTTP2.Frame.Ping{ack: false} = frame, _max_frame_size), - do: [{0x6, 0x0, 0, frame.payload}] + do: [{6, 0, 0, frame.payload}] end end diff --git a/grpc_core/lib/grpc/transport/http2/frame/priority.ex b/grpc_core/lib/grpc/transport/http2/frame/priority.ex index c66e835ea..65815bc8f 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/priority.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/priority.ex @@ -33,7 +33,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Priority do {:ok, %__MODULE__{ stream_id: stream_id, - exclusive_dependency: exclusive_dependency == 0x01, + exclusive_dependency: exclusive_dependency == 1, stream_dependency: stream_dependency, weight: weight }} diff --git a/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex index ade51ec22..e3dfb5806 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/push_promise.ex @@ -46,7 +46,7 @@ defmodule GRPC.Transport.HTTP2.Frame.PushPromise do end def deserialize(flags, stream_id, <<_reserved::1, promised_stream_id::31, fragment::binary>>) - when clear?(flags, @padding_bit) do + when not set?(flags, @padding_bit) do {:ok, %__MODULE__{ stream_id: stream_id, @@ -67,7 +67,7 @@ defmodule GRPC.Transport.HTTP2.Frame.PushPromise do def serialize(%GRPC.Transport.HTTP2.Frame.PushPromise{} = frame, _max_frame_size) do payload = <<0::1, frame.promised_stream_id::31, frame.fragment::binary>> - [{0x5, set([@end_headers_bit]), frame.stream_id, payload}] + [{5, set([@end_headers_bit]), frame.stream_id, payload}] end end end diff --git a/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex index b9c9cede5..e316dbc84 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/rst_stream.ex @@ -31,7 +31,7 @@ defmodule GRPC.Transport.HTTP2.Frame.RstStream do defimpl GRPC.Transport.HTTP2.Frame.Serializable do def serialize(%GRPC.Transport.HTTP2.Frame.RstStream{} = frame, _max_frame_size) do - [{0x3, 0x0, frame.stream_id, <>}] + [{3, 0, frame.stream_id, <>}] end end end diff --git a/grpc_core/lib/grpc/transport/http2/frame/settings.ex b/grpc_core/lib/grpc/transport/http2/frame/settings.ex index 41d7415a6..5d8d9939b 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/settings.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/settings.ex @@ -21,7 +21,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do iodata() ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} - def deserialize(flags, 0, payload) when clear?(flags, @ack_bit) do + def deserialize(flags, 0, payload) when not set?(flags, @ack_bit) do payload |> Stream.unfold(fn <<>> -> nil @@ -29,42 +29,42 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do <> -> {{:error, rest}, <<>>} end) |> Enum.reduce_while({:ok, %{}}, fn - {:ok, {0x01, value}}, {:ok, acc} -> + {:ok, {1, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :header_table_size, value)}} - {:ok, {0x02, val}}, {:ok, acc} when val in [0x00, 0x01] -> + {:ok, {2, val}}, {:ok, acc} when val in [0, 1] -> {:cont, {:ok, acc}} - {:ok, {0x02, _value}}, {:ok, _acc} -> + {:ok, {2, _value}}, {:ok, _acc} -> {:halt, {:error, GRPC.Transport.HTTP2.Errors.protocol_error(), "Invalid enable_push value (RFC9113§6.5)"}} - {:ok, {0x03, value}}, {:ok, acc} -> + {:ok, {3, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_concurrent_streams, value)}} - {:ok, {0x04, value}}, {:ok, _acc} when value > @max_window_size -> + {:ok, {4, value}}, {:ok, _acc} when value > @max_window_size -> {:halt, {:error, GRPC.Transport.HTTP2.Errors.flow_control_error(), "Invalid window_size (RFC9113§6.5)"}} - {:ok, {0x04, value}}, {:ok, acc} -> + {:ok, {4, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :initial_window_size, value)}} - {:ok, {0x05, value}}, {:ok, _acc} when value < @min_frame_size -> + {:ok, {5, value}}, {:ok, _acc} when value < @min_frame_size -> {:halt, {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} - {:ok, {0x05, value}}, {:ok, _acc} when value > @max_frame_size -> + {:ok, {5, value}}, {:ok, _acc} when value > @max_frame_size -> {:halt, {:error, GRPC.Transport.HTTP2.Errors.frame_size_error(), "Invalid max_frame_size (RFC9113§6.5)"}} - {:ok, {0x05, value}}, {:ok, acc} -> + {:ok, {5, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_frame_size, value)}} - {:ok, {0x06, value}}, {:ok, acc} -> + {:ok, {6, value}}, {:ok, acc} -> {:cont, {:ok, Map.put(acc, :max_header_list_size, value)}} {:ok, {_setting, _value}}, {:ok, acc} -> @@ -98,7 +98,7 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do @ack_bit 0 def serialize(%GRPC.Transport.HTTP2.Frame.Settings{ack: true}, _max_frame_size), - do: [{0x4, set([@ack_bit]), 0, <<>>}] + do: [{4, set([@ack_bit]), 0, <<>>}] def serialize(%GRPC.Transport.HTTP2.Frame.Settings{ack: false} = frame, _max_frame_size) do payload = @@ -106,18 +106,18 @@ defmodule GRPC.Transport.HTTP2.Frame.Settings do |> Enum.uniq_by(fn {setting, _} -> setting end) |> Enum.map(fn {:header_table_size, 4_096} -> <<>> - {:header_table_size, value} -> <<0x01::16, value::32>> + {:header_table_size, value} -> <<1::16, value::32>> {:max_concurrent_streams, :infinity} -> <<>> - {:max_concurrent_streams, value} -> <<0x03::16, value::32>> + {:max_concurrent_streams, value} -> <<3::16, value::32>> {:initial_window_size, 65_535} -> <<>> - {:initial_window_size, value} -> <<0x04::16, value::32>> + {:initial_window_size, value} -> <<4::16, value::32>> {:max_frame_size, 16_384} -> <<>> - {:max_frame_size, value} -> <<0x05::16, value::32>> + {:max_frame_size, value} -> <<5::16, value::32>> {:max_header_list_size, :infinity} -> <<>> - {:max_header_list_size, value} -> <<0x06::16, value::32>> + {:max_header_list_size, value} -> <<6::16, value::32>> end) - [{0x4, 0x0, 0, payload}] + [{4, 0, 0, payload}] end end end diff --git a/grpc_core/lib/grpc/transport/http2/frame/window_update.ex b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex index b24810e1e..bfb369230 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/window_update.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/window_update.ex @@ -31,7 +31,7 @@ defmodule GRPC.Transport.HTTP2.Frame.WindowUpdate do defimpl GRPC.Transport.HTTP2.Frame.Serializable do def serialize(%GRPC.Transport.HTTP2.Frame.WindowUpdate{} = frame, _max_frame_size) do - [{0x8, 0x0, frame.stream_id, <<0::1, frame.size_increment::31>>}] + [{8, 0, frame.stream_id, <<0::1, frame.size_increment::31>>}] end end end From 8a0d48dd20e4bcb04633176889a11e307a6929b3 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 15:37:07 -0300 Subject: [PATCH 37/47] ref: after review adjustments --- .../lib/grpc/transport/http2/frame/ping.ex | 8 +- .../test/grpc/transport/http2/frame_test.exs | 10 +- .../grpc/server/adapters/thousand_island.ex | 134 ++++++++++-------- 3 files changed, 80 insertions(+), 72 deletions(-) diff --git a/grpc_core/lib/grpc/transport/http2/frame/ping.ex b/grpc_core/lib/grpc/transport/http2/frame/ping.ex index 6dd2cbfc2..155414202 100644 --- a/grpc_core/lib/grpc/transport/http2/frame/ping.ex +++ b/grpc_core/lib/grpc/transport/http2/frame/ping.ex @@ -19,12 +19,8 @@ defmodule GRPC.Transport.HTTP2.Frame.Ping do iodata() ) :: {:ok, t()} | {:error, GRPC.Transport.HTTP2.Errors.error_code(), binary()} - def deserialize(flags, 0, <>) when set?(flags, @ack_bit) do - {:ok, %__MODULE__{ack: true, payload: payload}} - end - - def deserialize(flags, 0, <>) when not set?(flags, @ack_bit) do - {:ok, %__MODULE__{ack: false, payload: payload}} + def deserialize(flags, 0, <>) do + {:ok, %__MODULE__{ack: set?(flags, @ack_bit), payload: payload}} end def deserialize(_flags, stream_id, _payload) when stream_id != 0 do diff --git a/grpc_core/test/grpc/transport/http2/frame_test.exs b/grpc_core/test/grpc/transport/http2/frame_test.exs index bed6bfe6e..094b3a9fe 100644 --- a/grpc_core/test/grpc/transport/http2/frame_test.exs +++ b/grpc_core/test/grpc/transport/http2/frame_test.exs @@ -195,16 +195,16 @@ defmodule GRPC.Transport.HTTP2.FrameTest do refute Frame.Flags.set?(flags, 3) end - test "clear?/2 guard works correctly" do + test "not set?/2 works correctly with guard" do require Frame.Flags # bits 0 and 2 set flags = 0b00000101 - refute Frame.Flags.clear?(flags, 0) - assert Frame.Flags.clear?(flags, 1) - refute Frame.Flags.clear?(flags, 2) - assert Frame.Flags.clear?(flags, 3) + refute not Frame.Flags.set?(flags, 0) + assert not Frame.Flags.set?(flags, 1) + refute not Frame.Flags.set?(flags, 2) + assert not Frame.Flags.set?(flags, 3) end end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index 8a679d0f5..e6f7e5035 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -10,7 +10,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do ## Advantages over Cowboy - - **Pure Elixir**: No Erlang dependencies, better integration - **Built-in pooling**: Native connection pool management - **Lower overhead**: Simpler architecture, fewer layers - **Modern design**: Built with current Elixir best practices @@ -62,18 +61,19 @@ defmodule GRPC.Server.Adapters.ThousandIsland do ### Process Hierarchy - ``` - ThousandIsland Supervisor - └── Handler Process (one per connection) - ├── Connection State (HTTP2.Connection) - │ └── Stream States (HTTP2.StreamState per stream_id) - └── User Handler Tasks (spawned per RPC) - └── BidiStream GenServer (only for bidi streaming) + ```mermaid + graph TD + A[ThousandIsland Supervisor] --> B[Handler Process] + B --> C[Connection State
HTTP2.Connection] + C --> D[Stream States
HTTP2.StreamState per stream_id] + B --> E[User Handler Tasks
spawned per RPC] + E --> F[BidiStream GenServer
only for bidi streaming] ``` ### 1. Unary RPC (request -> response) #### Request Path + 1. **Client sends HTTP/2 frames** → TCP socket 2. **Handler.handle_data/3** receives raw bytes - Buffers until complete frames available @@ -90,6 +90,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do - Handler runs **synchronously** in Handler process #### Response Path + 1. **Handler returns response** (or calls `GRPC.Server.send_reply/2`) 2. **Dispatcher** sends response headers + data + trailers - Headers: `{":status" => "200", "content-type" => "application/grpc+proto"}` @@ -108,6 +109,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do ### 2. Client Streaming RPC (stream of requests -> response) #### Request Path + 1. **Client sends multiple DATA frames** (END_STREAM on last) 2. **Handler.handle_data/3** → **Connection.handle_frame/3** - Each DATA frame appends to StreamState.data_buffer @@ -120,6 +122,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do - Handler **synchronously** consumes stream #### Response Path + Same as Unary (single response at end) **Process Model**: Single Handler process, synchronous handler execution @@ -127,9 +130,11 @@ defmodule GRPC.Server.Adapters.ThousandIsland do ### 3. Server Streaming RPC (request -> stream of responses) #### Request Path + Same as Unary (single request) #### Response Path + 1. **Handler calls `GRPC.Server.send_reply/2` multiple times** 2. **This adapter's `send_reply/3`** sends async message: - `send(handler_pid, {:grpc_send_data, stream_id, framed_data})` @@ -141,6 +146,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do - Handler sends final HEADERS frame with END_STREAM **Process Model**: + - Handler spawns **Task** to run user handler asynchronously - Handler process receives messages from Task and sends frames - Task communicates via messages to Handler process @@ -151,22 +157,25 @@ defmodule GRPC.Server.Adapters.ThousandIsland do #### Process Model - ``` - Handler Process (#PID<0.545.0>) - ├── State: %{accumulated_headers: %{stream_id => headers}, ...} - ├── Receives: HTTP/2 frames from client - ├── Receives: {:grpc_send_data, ...} from User Task - └── Sends: HTTP/2 frames to client - - User Handler Task (#PID<0.XXX.0>) - ├── Runs: MyServer.full_duplex_call(request_enum, stream) - ├── Consumes: request_enum (lazy, pulls from BidiStream) - └── Sends: {:grpc_send_data, ...} messages to Handler - - BidiStream GenServer (#PID<0.YYY.0>) - ├── Queue: Buffered incoming requests - ├── Receives: {:add_message, msg} from Handler (when DATA arrives) - └── Provides: Lazy enumerable to User Task + ```mermaid + graph LR + subgraph HP["Handler Process (#PID<0.545.0>)"] + HS["State: accumulated_headers
stream_id => headers"] + end + + subgraph UT["User Handler Task (#PID<0.XXX.0>)"] + UTR["Runs: MyServer.full_duplex_call
request_enum, stream"] + end + + subgraph BS["BidiStream GenServer (#PID<0.YYY.0>)"] + BSQ["Queue: Buffered incoming requests"] + end + + Client -->|HTTP/2 frames| HP + HP -->|HTTP/2 frames| Client + UT -->|:grpc_send_data| HP + HP -->|:add_message| BS + BS -->|request_enum
lazy pull| UT ``` #### Request Path (Incoming) @@ -281,43 +290,46 @@ defmodule GRPC.Server.Adapters.ThousandIsland do ## Message Flow Diagram (Bidi Streaming) - ``` - Client Handler Process User Task BidiStream - │ │ │ │ - ├─── HEADERS ────────────>│ │ │ - │ ├─ create StreamState │ │ - │ ├─ start BidiStream ───────┼──────────────────────>│ - │ ├─ accumulate headers │ │ - │ ├─ spawn Task ────────────>│ │ - │ │ ├─ request_enum │ - │ │ │ (lazy, blocks) │ - ├─── DATA(req1) ─────────>│ │ │ - │ ├─ decode message │ │ - │ ├─ add_message ────────────┼──────────────────────>│ - │ │ │<──── pull next ───────┤ - │ │ │ (req1) │ - │ │ ├─ process req1 │ - │ │ ├─ send_reply(resp1) │ - │ │<── :grpc_send_data ──────┤ │ - │ ├─ send headers (1st!) │ │ - │<─── HEADERS ────────────┤ │ │ - │<─── DATA(resp1) ────────┤ │ │ - ├─── DATA(req2) ─────────>│ │ │ - │ ├─ add_message ────────────┼──────────────────────>│ - │ │ │<──── pull next ───────┤ - │ │ │ (req2) │ - │ │ ├─ process req2 │ - │ │ ├─ send_reply(resp2) │ - │ │<── :grpc_send_data ──────┤ │ - │<─── DATA(resp2) ────────┤ │ │ - ├─── DATA (END_STREAM) ──>│ │ │ - │ ├─ finish stream ──────────┼──────────────────────>│ - │ │ │<──── nil (done) ──────┤ - │ │ ├─ handler finishes │ - │ │ ├─ send_trailers │ - │ │<── :grpc_send_trailers ──┤ │ - │<─── HEADERS(trailers) ──┤ │ │ - │ (END_STREAM) │ x x + ```mermaid + sequenceDiagram + participant Client + participant Handler as Handler Process + participant Task as User Task + participant Bidi as BidiStream + + Client->>Handler: HEADERS + Handler->>Handler: create StreamState + Handler->>Bidi: start BidiStream + Handler->>Handler: accumulate headers + Handler->>Task: spawn Task + Task->>Task: request_enum (lazy, blocks) + + Client->>Handler: DATA(req1) + Handler->>Handler: decode message + Handler->>Bidi: add_message + Bidi->>Task: pull next (req1) + Task->>Task: process req1 + Task->>Task: send_reply(resp1) + Task->>Handler: :grpc_send_data + Handler->>Handler: send headers (1st!) + Handler->>Client: HEADERS + Handler->>Client: DATA(resp1) + + Client->>Handler: DATA(req2) + Handler->>Bidi: add_message + Bidi->>Task: pull next (req2) + Task->>Task: process req2 + Task->>Task: send_reply(resp2) + Task->>Handler: :grpc_send_data + Handler->>Client: DATA(resp2) + + Client->>Handler: DATA (END_STREAM) + Handler->>Bidi: finish stream + Bidi->>Task: nil (done) + Task->>Task: handler finishes + Task->>Task: send_trailers + Task->>Handler: :grpc_send_trailers + Handler->>Client: HEADERS(trailers)
END_STREAM ``` ## Key Design Patterns From 0776ed40bd03d78710af7563a5432bd957b2d848 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 18:31:09 -0300 Subject: [PATCH 38/47] ref: more defensive process model and error handling This commit reduces a bit the general performance but increase correctness --- grpc_server/lib/grpc/server.ex | 13 ++ .../lib/grpc/server/adapters/cowboy.ex | 3 - .../grpc/server/adapters/thousand_island.ex | 33 +---- .../adapters/thousand_island/handler.ex | 81 +++++++++- .../lib/grpc/server/http2/dispatcher.ex | 140 ++++++++---------- grpc_server/lib/grpc/server/supervisor.ex | 13 +- .../server/adapters/thousand_island_test.exs | 22 +-- .../test/grpc/server/supervisor_test.exs | 20 ++- 8 files changed, 188 insertions(+), 137 deletions(-) diff --git a/grpc_server/lib/grpc/server.ex b/grpc_server/lib/grpc/server.ex index 57b2370d2..dc5e9cac7 100644 --- a/grpc_server/lib/grpc/server.ex +++ b/grpc_server/lib/grpc/server.ex @@ -404,6 +404,13 @@ defmodule GRPC.Server do @spec start(module() | [module()], non_neg_integer(), Keyword.t()) :: {atom(), any(), non_neg_integer()} | {:error, any()} def start(servers, port, opts \\ []) do + # Ensure Task.Supervisor is started for streaming RPCs + # (When using Supervisor, it's started there; when using start/3 directly, we need it here) + case Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) do + {:ok, _pid} -> :ok + {:error, {:already_started, _pid}} -> :ok + end + adapter = Keyword.get(opts, :adapter) || GRPC.Server.Adapters.Cowboy servers = GRPC.Server.servers_to_map(servers) adapter.start(nil, servers, port, opts) @@ -413,6 +420,12 @@ defmodule GRPC.Server do @spec start_endpoint(atom(), non_neg_integer(), Keyword.t()) :: {atom(), any(), non_neg_integer()} def start_endpoint(endpoint, port, opts \\ []) do + # Ensure Task.Supervisor is started for streaming RPCs + case Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) do + {:ok, _pid} -> :ok + {:error, {:already_started, _pid}} -> :ok + end + opts = Keyword.validate!(opts, adapter: GRPC.Server.Adapters.Cowboy) adapter = opts[:adapter] servers = endpoint.__meta__(:servers) diff --git a/grpc_server/lib/grpc/server/adapters/cowboy.ex b/grpc_server/lib/grpc/server/adapters/cowboy.ex index 42ffe56a2..95ee0a2f3 100644 --- a/grpc_server/lib/grpc/server/adapters/cowboy.ex +++ b/grpc_server/lib/grpc/server/adapters/cowboy.ex @@ -89,9 +89,6 @@ defmodule GRPC.Server.Adapters.Cowboy do @spec start_link(atom(), atom(), %{String.t() => [module()]}, any()) :: {:ok, pid()} | {:error, any()} def start_link(scheme, endpoint, servers, {m, f, [ref | _] = a}) do - # Initialize ETS cache for codecs/compressors lookup - GRPC.Server.Cache.init() - case apply(m, f, a) do {:ok, pid} -> Logger.info(running_info(scheme, endpoint, servers, ref)) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index e6f7e5035..7a312a8d8 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -377,16 +377,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do """ @impl true def start(endpoint, servers, port, opts) do - GRPC.Server.Cache.init() - - case Process.whereis(GRPC.Server.StreamTaskSupervisor) do - nil -> - {:ok, _} = Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) - - _pid -> - :ok - end - server_opts = build_server_opts(endpoint, servers, port, opts) case ThousandIsland.start_link(server_opts) do @@ -423,8 +413,6 @@ defmodule GRPC.Server.Adapters.ThousandIsland do def child_spec(endpoint, servers, port, opts) do server_opts = build_server_opts(endpoint, servers, port, opts) - GRPC.Server.Cache.init() - scheme = if cred_opts(opts), do: :https, else: :http Logger.info( @@ -433,20 +421,9 @@ defmodule GRPC.Server.Adapters.ThousandIsland do server_name = servers_name(endpoint, servers) - children = [ - {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, - %{ - id: :thousand_island, - start: {ThousandIsland, :start_link, [server_opts]}, - type: :supervisor, - restart: :permanent, - shutdown: :infinity - } - ] - %{ id: server_name, - start: {Supervisor, :start_link, [children, [strategy: :rest_for_one]]}, + start: {ThousandIsland, :start_link, [server_opts]}, type: :supervisor, restart: :permanent, shutdown: :infinity @@ -490,10 +467,10 @@ defmodule GRPC.Server.Adapters.ThousandIsland do :ok end - def set_resp_trailers(%{handler_pid: _pid, stream_id: stream_id}, trailers) do - # Store in process dictionary (runs in handler context during dispatch) - current_custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - Process.put({:grpc_custom_trailers, stream_id}, Map.merge(current_custom_trailers, trailers)) + def set_resp_trailers(%{handler_pid: pid, stream_id: stream_id}, trailers) do + # Send message to accumulate trailers in handler state + # They will be merged with final trailers when stream completes + send(pid, {:grpc_accumulate_trailers, stream_id, trailers}) :ok end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index 1aa552ae1..c9ac19a18 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -51,7 +51,9 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do connection: nil, buffer: <<>>, preface_received: false, - accumulated_headers: %{} + accumulated_headers: %{}, + accumulated_trailers: %{}, + stream_tasks: %{} } {:continue, new_state} @@ -87,6 +89,19 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do {:noreply, {socket, %{state | accumulated_headers: new_accumulated}}} end + def handle_info({:grpc_accumulate_trailers, stream_id, trailers}, {socket, state}) do + current_trailers = Map.get(state.accumulated_trailers, stream_id, %{}) + updated_trailers = Map.merge(current_trailers, trailers) + new_accumulated = Map.put(state.accumulated_trailers, stream_id, updated_trailers) + {:noreply, {socket, %{state | accumulated_trailers: new_accumulated}}} + end + + def handle_info({:register_stream_task, stream_id, task_pid, task_ref}, {socket, state}) do + Logger.debug("[Handler] Registering stream task for stream #{stream_id}, pid=#{inspect(task_pid)}") + new_stream_tasks = Map.put(state.stream_tasks, stream_id, {task_pid, task_ref}) + {:noreply, {socket, %{state | stream_tasks: new_stream_tasks}}} + end + def handle_info({:grpc_send_headers, stream_id, headers}, {socket, state}) do Logger.debug("[Streaming] Sending headers for stream #{stream_id}") Connection.send_headers(socket, stream_id, headers, state.connection) @@ -124,12 +139,22 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do state end + # Merge accumulated custom trailers with final trailers + accumulated_trailers = Map.get(state.accumulated_trailers, stream_id, %{}) + final_trailers = Map.merge(trailers, accumulated_trailers) + # Send trailers (headers with END_STREAM) for streaming # This will also remove the stream from the connection updated_connection = - Connection.send_trailers(socket, stream_id, trailers, new_state.connection) + Connection.send_trailers(socket, stream_id, final_trailers, new_state.connection) + + new_state = %{ + new_state + | connection: updated_connection, + accumulated_trailers: Map.delete(new_state.accumulated_trailers, stream_id), + stream_tasks: Map.delete(new_state.stream_tasks, stream_id) + } - new_state = %{new_state | connection: updated_connection} {:noreply, {socket, new_state}} end @@ -148,6 +173,56 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do {:noreply, {socket, %{state | connection: updated_connection}}} end + def handle_info({:DOWN, _ref, :process, pid, reason}, {socket, state}) do + # Task crashed - find which stream it belongs to and send error trailers + case Enum.find(state.stream_tasks, fn {_stream_id, {task_pid, _ref}} -> task_pid == pid end) do + {stream_id, _} -> + Logger.error("[Handler] Stream #{stream_id} task crashed: #{inspect(reason)}") + + # Send error trailers to client + error_trailers = %{ + "grpc-status" => "13", + # INTERNAL + "grpc-message" => "Stream handler crashed: #{inspect(reason)}" + } + + # Check if we have unsent accumulated headers (stream never sent data) + accumulated = Map.get(state.accumulated_headers, stream_id, %{}) + + new_state = + if map_size(accumulated) > 0 do + updated_conn = + Connection.send_headers(socket, stream_id, accumulated, state.connection) + + %{ + state + | accumulated_headers: Map.delete(state.accumulated_headers, stream_id), + connection: updated_conn + } + else + state + end + + # Send error trailers + updated_connection = + Connection.send_trailers(socket, stream_id, error_trailers, new_state.connection) + + new_state = %{ + new_state + | connection: updated_connection, + accumulated_trailers: Map.delete(new_state.accumulated_trailers, stream_id), + stream_tasks: Map.delete(new_state.stream_tasks, stream_id) + } + + {:noreply, {socket, new_state}} + + nil -> + # Task not found in our tracking - ignore silently + # This can happen for tasks spawned outside our control + {:noreply, {socket, state}} + end + end + def handle_info(_msg, {socket, state}) do {:noreply, {socket, state}} end diff --git a/grpc_server/lib/grpc/server/http2/dispatcher.ex b/grpc_server/lib/grpc/server/http2/dispatcher.ex index 7a8695c94..aa40dcf3a 100644 --- a/grpc_server/lib/grpc/server/http2/dispatcher.ex +++ b/grpc_server/lib/grpc/server/http2/dispatcher.ex @@ -276,14 +276,9 @@ defmodule GRPC.Server.HTTP2.Dispatcher do GRPC.Server.send_reply(stream, response) end - # Get custom trailers from process dictionary (set by handler via set_trailers) - stream_id = stream.payload.stream_id - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - - # Merge with mandatory grpc-status - trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) - # Send trailers at the end with END_STREAM + # Custom trailers already accumulated in Handler via set_resp_trailers + trailers = %{"grpc-status" => "0"} GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) # Return special marker for async handling (like server_streaming) @@ -291,19 +286,16 @@ defmodule GRPC.Server.HTTP2.Dispatcher do rescue e in GRPC.RPCError -> # Send error as trailers (headers already accumulated, will be sent with trailers) - stream_id = stream.payload.stream_id + _stream_id = stream.payload.stream_id error_trailers = %{ "grpc-status" => "#{e.status}", "grpc-message" => e.message || "" } - # Get custom trailers from process dictionary - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - trailers = Map.merge(error_trailers, custom_trailers) - # Send trailers with error (will send accumulated headers first if not sent yet) - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + # Custom trailers already accumulated in Handler via set_resp_trailers + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) # Return streaming_done (error already sent) {:ok, :streaming_done} @@ -317,7 +309,6 @@ defmodule GRPC.Server.HTTP2.Dispatcher do "grpc-message" => Exception.message(e) } - # Send trailers with error GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) # Return streaming_done (error already sent) @@ -375,31 +366,22 @@ defmodule GRPC.Server.HTTP2.Dispatcher do # Send response using async message (this will send accumulated headers first) GRPC.Server.send_reply(stream, response) - # Get custom trailers from process dictionary (set by handler via set_trailers) - stream_id = stream.payload.stream_id - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - - # Merge with mandatory grpc-status - trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) - # Send trailers at the end with END_STREAM + # Custom trailers already accumulated in Handler via set_resp_trailers + trailers = %{"grpc-status" => "0"} GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) # Return special marker for async handling (like server_streaming) {:ok, :streaming_done} rescue e in GRPC.RPCError -> - # Send error as trailers - stream_id = stream.payload.stream_id - + # Send error as trailers (headers already accumulated, will be sent with trailers) error_trailers = %{ "grpc-status" => "#{e.status}", "grpc-message" => e.message || "" } - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - trailers = Map.merge(error_trailers, custom_trailers) - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) {:ok, :streaming_done} e -> @@ -461,14 +443,9 @@ defmodule GRPC.Server.HTTP2.Dispatcher do # Handler calls GRPC.Server.send_reply for each response apply(server, func_name, [request, stream]) - # Get custom trailers from process dictionary (set by handler via set_trailers) - stream_id = stream.payload.stream_id - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - - # Merge with mandatory grpc-status - trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) - # Send trailers at the end with END_STREAM + # Custom trailers already accumulated in Handler via set_resp_trailers + trailers = %{"grpc-status" => "0"} GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) # Return special marker for streaming (not data to send) @@ -476,16 +453,12 @@ defmodule GRPC.Server.HTTP2.Dispatcher do rescue e in GRPC.RPCError -> # Send error as trailers - stream_id = stream.payload.stream_id - error_trailers = %{ "grpc-status" => "#{e.status}", "grpc-message" => e.message || "" } - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - trailers = Map.merge(error_trailers, custom_trailers) - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, error_trailers) {:ok, :streaming_done} e -> @@ -604,57 +577,66 @@ defmodule GRPC.Server.HTTP2.Dispatcher do | payload: %{stream.payload | stream_state: updated_stream_state} } - # CRITICAL: Run handler in a separate task to not block the connection handler + # Get handler_pid to send task monitoring info + handler_pid = stream.payload.handler_pid + + # CRITICAL: Run handler in a separate supervised task to not block the connection handler # The connection handler MUST continue processing incoming DATA frames # and feed them to the BidiStream while the handler is consuming messages - Task.start(fn -> - try do - # Use GRPC.Server.call to properly handle the request - # This ensures the reading_stream is created correctly via adapter.reading_stream - result = GRPC.Server.call(server, updated_stream, rpc, func_name) - Logger.info("[call_bidi_streaming] Handler returned: #{inspect(result)}") - - case result do - {:ok, stream} -> - # Get custom trailers from process dictionary - custom_trailers = Process.get({:grpc_custom_trailers, stream_id}, %{}) - trailers = Map.merge(%{"grpc-status" => "0"}, custom_trailers) - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) - - {:error, error} -> - Logger.error("[call_bidi_streaming] Handler error result: #{inspect(error)}") + # Use async_nolink to avoid taking down the supervisor if task crashes + task = + Task.Supervisor.async_nolink(GRPC.Server.StreamTaskSupervisor, fn -> + try do + # Use GRPC.Server.call to properly handle the request + # This ensures the reading_stream is created correctly via adapter.reading_stream + result = GRPC.Server.call(server, updated_stream, rpc, func_name) + Logger.info("[call_bidi_streaming] Handler returned: #{inspect(result)}") + + case result do + {:ok, stream} -> + # Send trailers with success status + # Custom trailers already accumulated in Handler via set_resp_trailers + trailers = %{"grpc-status" => "0"} + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + + {:error, error} -> + Logger.error("[call_bidi_streaming] Handler error result: #{inspect(error)}") + + trailers = %{ + "grpc-status" => "#{error.status || 2}", + "grpc-message" => error.message || "Handler error" + } + + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + end + rescue + e in GRPC.RPCError -> + Logger.error("[call_bidi_streaming] Handler RPC Error: #{inspect(e)}") trailers = %{ - "grpc-status" => "#{error.status || 2}", - "grpc-message" => error.message || "Handler error" + "grpc-status" => "#{e.status}", + "grpc-message" => e.message || "" } GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) - end - rescue - e in GRPC.RPCError -> - Logger.error("[call_bidi_streaming] Handler RPC Error: #{inspect(e)}") - - trailers = %{ - "grpc-status" => "#{e.status}", - "grpc-message" => e.message || "" - } - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + e -> + Logger.error( + "[call_bidi_streaming] Handler error: #{Exception.message(e)}\n#{Exception.format_stacktrace(__STACKTRACE__)}" + ) - e -> - Logger.error( - "[call_bidi_streaming] Handler error: #{Exception.message(e)}\n#{Exception.format_stacktrace(__STACKTRACE__)}" - ) + trailers = %{ + "grpc-status" => "2", + "grpc-message" => Exception.message(e) + } - trailers = %{ - "grpc-status" => "2", - "grpc-message" => Exception.message(e) - } + GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) + end + end) - GRPC.Server.Adapters.ThousandIsland.send_trailers(stream.payload, trailers) - end - end) + # Register task in handler for monitoring + # Handler will monitor the task and send error trailers if it crashes unexpectedly + send(handler_pid, {:register_stream_task, stream_id, task.pid, task.ref}) # Return special marker for streaming (not data to send) {:ok, :streaming_done} diff --git a/grpc_server/lib/grpc/server/supervisor.ex b/grpc_server/lib/grpc/server/supervisor.ex index 946d92cc4..59469074d 100644 --- a/grpc_server/lib/grpc/server/supervisor.ex +++ b/grpc_server/lib/grpc/server/supervisor.ex @@ -103,12 +103,15 @@ defmodule GRPC.Server.Supervisor do servers end + GRPC.Server.Cache.init() + children = - if opts[:start_server] do - [child_spec(endpoint_or_servers, opts[:port], opts)] - else - [] - end + [{Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}] ++ + if opts[:start_server] do + [child_spec(endpoint_or_servers, opts[:port], opts)] + else + [] + end Supervisor.init(children, strategy: :one_for_one) end diff --git a/grpc_server/test/grpc/server/adapters/thousand_island_test.exs b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs index ce331db4f..82829afa9 100644 --- a/grpc_server/test/grpc/server/adapters/thousand_island_test.exs +++ b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs @@ -7,33 +7,21 @@ defmodule GRPC.Server.Adapters.ThousandIslandTest do test "returns valid child spec" do spec = Adapter.child_spec(:test_endpoint, [], 50051, []) - # The spec should be a supervisor child spec that wraps ThousandIsland + Task.Supervisor + # After moving Task.Supervisor to GRPC.Server.Supervisor, + # the spec now returns ThousandIsland's child spec directly assert is_map(spec) assert spec.type == :supervisor - assert {Supervisor, :start_link, [children, _opts]} = spec.start - assert is_list(children) - - # Should have Task.Supervisor and ThousandIsland children - assert length(children) == 2 - [task_sup, ti_child] = children - - # First child is Task.Supervisor - assert {Task.Supervisor, [name: GRPC.Server.StreamTaskSupervisor]} = task_sup - - # Second child is ThousandIsland - assert ti_child.id == :thousand_island - assert {ThousandIsland, :start_link, [args]} = ti_child.start + assert {ThousandIsland, :start_link, [args]} = spec.start assert is_list(args) assert args[:port] == 50051 + assert args[:handler_module] == GRPC.Server.Adapters.ThousandIsland.Handler end test "includes adapter options in child spec" do opts = [num_acceptors: 5, num_connections: 50] spec = Adapter.child_spec(:test_endpoint, [], 50051, opts) - {Supervisor, :start_link, [children, _opts]} = spec.start - [_task_sup, ti_child] = children - {ThousandIsland, :start_link, [args]} = ti_child.start + {ThousandIsland, :start_link, [args]} = spec.start assert args[:num_acceptors] == 5 assert args[:num_connections] == 50 diff --git a/grpc_server/test/grpc/server/supervisor_test.exs b/grpc_server/test/grpc/server/supervisor_test.exs index f6f5bf733..bc63e7966 100644 --- a/grpc_server/test/grpc/server/supervisor_test.exs +++ b/grpc_server/test/grpc/server/supervisor_test.exs @@ -9,8 +9,18 @@ defmodule GRPC.Server.SupervisorTest do describe "init/1" do test "does not start children if opts sets false" do - assert {:ok, {%{strategy: :one_for_one}, []}} = - Supervisor.init(endpoint: MockEndpoint, port: 1234, start_server: false) + assert { + :ok, + {%{strategy: :one_for_one}, + [ + %{ + id: GRPC.Server.StreamTaskSupervisor, + start: + {Task.Supervisor, :start_link, [[name: GRPC.Server.StreamTaskSupervisor]]}, + type: :supervisor + } + ]} + } = Supervisor.init(endpoint: MockEndpoint, port: 1234, start_server: false) end test "fails if a tuple is passed" do @@ -35,6 +45,12 @@ defmodule GRPC.Server.SupervisorTest do { %{strategy: :one_for_one, auto_shutdown: :never, intensity: 3, period: 5}, [ + %{ + id: GRPC.Server.StreamTaskSupervisor, + start: + {Task.Supervisor, :start_link, [[name: GRPC.Server.StreamTaskSupervisor]]}, + type: :supervisor + }, %{ id: {:ranch_embedded_sup, ^endpoint_str}, start: From 074355099140c5fd0736acf261453c69938a191a Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 18:43:37 -0300 Subject: [PATCH 39/47] ref: use merge instead of ++ --- grpc_server/lib/grpc/server/adapters/thousand_island.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index 7a312a8d8..3457d3c40 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -608,7 +608,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do defp maybe_add_ssl(transport_opts, nil), do: transport_opts defp maybe_add_ssl(transport_opts, cred_opts) do - transport_opts ++ cred_opts.ssl + Keyword.merge(transport_opts, cred_opts.ssl) end defp transport_module(opts) do From 8e3e5f2d03f3efd415c53a03a4eb052d3f435c07 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 18:53:05 -0300 Subject: [PATCH 40/47] Update grpc_server/lib/grpc/server/adapters/thousand_island.ex Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- grpc_server/lib/grpc/server/adapters/thousand_island.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index 7a312a8d8..388037cda 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -620,7 +620,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end defp cred_opts(opts) do - Kernel.get_in(opts, [:cred]) + opts[:cred] end defp servers_name(nil, servers) do From 7e0c672c16b2a2935092b4c6034b2d97a24db456 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Mon, 15 Dec 2025 19:04:40 -0300 Subject: [PATCH 41/47] Update grpc_server/lib/grpc/server/adapters/thousand_island.ex Co-authored-by: Paulo Valente <16843419+polvalente@users.noreply.github.com> --- grpc_server/lib/grpc/server/adapters/thousand_island.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index 388037cda..12dfd6dd1 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -624,7 +624,7 @@ defmodule GRPC.Server.Adapters.ThousandIsland do end defp servers_name(nil, servers) do - servers |> Map.values() |> Enum.map(fn s -> inspect(s) end) |> Enum.join(",") + Enum.map_join(servers, ",", fn _k, s -> inspect(s) end) end defp servers_name(endpoint, _) do From 1fb869b27bbacf27d78dacf08c99d772fee6ce33 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 11:28:39 -0300 Subject: [PATCH 42/47] tests: added map_errors integration tests --- .../test/grpc/integration/stream_test.exs | 280 ++++++++++++++++++ grpc_server/lib/grpc/stream.ex | 3 +- 2 files changed, 282 insertions(+), 1 deletion(-) diff --git a/grpc_client/test/grpc/integration/stream_test.exs b/grpc_client/test/grpc/integration/stream_test.exs index 9ea4b7162..43ed355ee 100644 --- a/grpc_client/test/grpc/integration/stream_test.exs +++ b/grpc_client/test/grpc/integration/stream_test.exs @@ -33,4 +33,284 @@ defmodule GRPC.StreamTest do end) end end + + describe "map_error/2" do + defmodule MapErrorService do + use GRPC.Server, service: Routeguide.RouteGuide.Service + + def get_feature(input, materializer) do + GRPC.Stream.unary(input, materializer: materializer) + |> GRPC.Stream.map(fn point -> + # Trigger error when latitude is 0 + if point.latitude == 0 do + raise "Boom! Invalid latitude" + end + + %Routeguide.Feature{location: point, name: "#{point.latitude},#{point.longitude}"} + end) + |> GRPC.Stream.map_error(fn error -> + case error do + {:error, {:exception, %{message: msg}}} -> + {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Error: #{msg}")} + + other -> + # Not an error, return as-is to continue the flow + other + end + end) + |> GRPC.Stream.run() + end + end + + defmodule DirectRPCErrorService do + use GRPC.Server, service: Routeguide.RouteGuide.Service + + def get_feature(input, materializer) do + GRPC.Stream.unary(input, materializer: materializer) + |> GRPC.Stream.map(fn point -> + # Trigger error when latitude is negative + if point.latitude < 0 do + raise "Negative latitude not allowed" + end + + %Routeguide.Feature{location: point, name: "#{point.latitude},#{point.longitude}"} + end) + |> GRPC.Stream.map_error(fn error -> + case error do + {:error, {:exception, %{message: msg}}} -> + # Return RPCError directly without {:error, ...} wrapper + GRPC.RPCError.exception(status: :out_of_range, message: "Direct error: #{msg}") + + other -> + # Not an error, return as-is to continue the flow + other + end + end) + |> GRPC.Stream.run() + end + end + + defmodule ExplicitValidationService do + use GRPC.Server, service: Routeguide.RouteGuide.Service + + def get_feature(input, materializer) do + GRPC.Stream.unary(input, materializer: materializer) + |> GRPC.Stream.map(fn point -> + # Trigger different error types based on coordinates + cond do + point.latitude == 999 -> + raise RuntimeError, "Runtime error occurred" + + point.latitude == 888 -> + raise ArgumentError, "Argument is invalid" + + point.latitude == 777 -> + raise "Simple string error" + + true -> + %Routeguide.Feature{location: point, name: "valid"} + end + end) + |> GRPC.Stream.map_error(fn error -> + # Explicitly validate the error structure + case error do + {:error, {:exception, exception_data}} when is_map(exception_data) -> + # Validate that we have the expected exception structure + message = Map.get(exception_data, :message) + kind = Map.get(exception_data, :kind, :error) + + cond do + is_binary(message) and message =~ "Runtime error" -> + {:error, GRPC.RPCError.exception(status: :internal, message: "Validated: RuntimeError - #{message}")} + + is_binary(message) and message =~ "Argument is invalid" -> + {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Validated: ArgumentError - #{message}")} + + is_binary(message) -> + {:error, GRPC.RPCError.exception(status: :unknown, message: "Validated: #{kind} - #{message}")} + + true -> + {:error, GRPC.RPCError.exception(status: :unknown, message: "Validated but no message found")} + end + + other -> + # Not an exception error, pass through + other + end + end) + |> GRPC.Stream.run() + end + end + + defmodule MultipleErrorsService do + use GRPC.Server, service: Routeguide.RouteGuide.Service + + def get_feature(input, materializer) do + GRPC.Stream.unary(input, materializer: materializer) + |> GRPC.Stream.map(fn point -> + cond do + point.latitude == 0 -> + raise "Invalid latitude: cannot be zero" + + point.longitude == 0 -> + raise "Invalid longitude: cannot be zero" + + point.latitude < 0 -> + raise ArgumentError, "Latitude must be positive" + + true -> + %Routeguide.Feature{location: point, name: "valid"} + end + end) + |> GRPC.Stream.map_error(fn error -> + case error do + {:error, {:exception, %{message: msg}}} when is_binary(msg) -> + cond do + msg =~ "latitude" -> + {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Latitude error: #{msg}")} + + msg =~ "longitude" -> + {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Longitude error: #{msg}")} + + true -> + {:error, GRPC.RPCError.exception(status: :unknown, message: "Unknown error: #{msg}")} + end + + other -> + # Not an error we handle, return as-is to continue the flow + other + end + end) + |> GRPC.Stream.run() + end + end + + @tag :map_error + test "handles errors with map_error and sends RPCError to client" do + run_server([MapErrorService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test with invalid latitude (0) - should trigger error + invalid_point = %Routeguide.Point{latitude: 0, longitude: -746_188_906} + + result = Routeguide.RouteGuide.Stub.get_feature(channel, invalid_point, return_headers: true) + + # Should receive error response with custom message + assert {:error, error} = result + assert %GRPC.RPCError{} = error + # Status is returned as integer (3 = INVALID_ARGUMENT) + assert error.status == 3 + assert error.message =~ "Error: Boom! Invalid latitude" + end) + end + + @tag :map_error + test "handles successful requests without triggering map_error" do + run_server([MapErrorService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test with valid latitude (non-zero) - should succeed + valid_point = %Routeguide.Point{latitude: 409_146_138, longitude: -746_188_906} + + result = Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) + + assert {:ok, response, _metadata} = result + assert response.location == valid_point + assert response.name == "409146138,-746188906" + end) + end + + @tag :map_error + test "handles RPCError returned directly without {:error, ...} wrapper" do + run_server([DirectRPCErrorService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test with negative latitude - should trigger error + negative_point = %Routeguide.Point{latitude: -50, longitude: 100} + + result = Routeguide.RouteGuide.Stub.get_feature(channel, negative_point, return_headers: true) + + # Should receive error response with custom message + assert {:error, error} = result + assert %GRPC.RPCError{} = error + # Status is returned as integer (11 = OUT_OF_RANGE) + assert error.status == 11 + assert error.message =~ "Direct error: Negative latitude not allowed" + end) + end + + @tag :map_error + test "handles successful request when using direct RPCError service" do + run_server([DirectRPCErrorService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test with positive latitude - should succeed + valid_point = %Routeguide.Point{latitude: 50, longitude: 100} + + result = Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) + + assert {:ok, response, _metadata} = result + assert response.location == valid_point + assert response.name == "50,100" + end) + end + + @tag :map_error + test "handles different error types with conditional map_error" do + run_server([MultipleErrorsService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test latitude error + lat_error_point = %Routeguide.Point{latitude: 0, longitude: 100} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, lat_error_point) + assert error.status == 3 # INVALID_ARGUMENT + assert error.message =~ "Latitude error" + + # Test longitude error + long_error_point = %Routeguide.Point{latitude: 100, longitude: 0} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, long_error_point) + assert error.status == 3 # INVALID_ARGUMENT + assert error.message =~ "Longitude error" + + # Test ArgumentError (negative latitude) - falls into "Unknown error" branch + arg_error_point = %Routeguide.Point{latitude: -100, longitude: 100} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, arg_error_point) + assert error.status == 2 # UNKNOWN (because message contains "Latitude must be positive") + assert error.message =~ "Latitude must be positive" + end) + end + + @tag :map_error + test "explicitly validates exception structure in map_error" do + run_server([ExplicitValidationService], fn port -> + {:ok, channel} = GRPC.Stub.connect("localhost:#{port}", adapter_opts: [retry_timeout: 10]) + + # Test RuntimeError - should validate and transform to INTERNAL + runtime_error_point = %Routeguide.Point{latitude: 999, longitude: 100} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, runtime_error_point) + assert error.status == 13 # INTERNAL + assert error.message =~ "Validated: RuntimeError" + assert error.message =~ "Runtime error occurred" + + # Test ArgumentError - should validate and transform to INVALID_ARGUMENT + arg_error_point = %Routeguide.Point{latitude: 888, longitude: 100} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, arg_error_point) + assert error.status == 3 # INVALID_ARGUMENT + assert error.message =~ "Validated: ArgumentError" + assert error.message =~ "Argument is invalid" + + # Test simple string error - should validate and transform to UNKNOWN + string_error_point = %Routeguide.Point{latitude: 777, longitude: 100} + assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, string_error_point) + assert error.status == 2 # UNKNOWN + assert error.message =~ "Validated:" + assert error.message =~ "Simple string error" + + # Test successful request - should not trigger error handling + valid_point = %Routeguide.Point{latitude: 100, longitude: 100} + assert {:ok, response} = Routeguide.RouteGuide.Stub.get_feature(channel, valid_point) + assert response.name == "valid" + end) + end + end end diff --git a/grpc_server/lib/grpc/stream.ex b/grpc_server/lib/grpc/stream.ex index 9889b417a..3d256222b 100644 --- a/grpc_server/lib/grpc/stream.ex +++ b/grpc_server/lib/grpc/stream.ex @@ -544,7 +544,8 @@ defmodule GRPC.Stream do if not dry_run? do # RPCError should be raised, not sent as reply case msg do - %GRPC.RPCError{} -> raise msg + %GRPC.RPCError{} = rpc_error -> raise rpc_error + {:error, %GRPC.RPCError{} = rpc_error} -> raise rpc_error _ -> GRPC.Server.send_reply(from, msg) end end From 892b550cef6e7763251c2ae3a3937742ad5057ca Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 12:29:08 -0300 Subject: [PATCH 43/47] format --- .../test/grpc/integration/stream_test.exs | 84 ++++++++++++++----- 1 file changed, 63 insertions(+), 21 deletions(-) diff --git a/grpc_client/test/grpc/integration/stream_test.exs b/grpc_client/test/grpc/integration/stream_test.exs index 43ed355ee..5cecb9587 100644 --- a/grpc_client/test/grpc/integration/stream_test.exs +++ b/grpc_client/test/grpc/integration/stream_test.exs @@ -51,7 +51,8 @@ defmodule GRPC.StreamTest do |> GRPC.Stream.map_error(fn error -> case error do {:error, {:exception, %{message: msg}}} -> - {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Error: #{msg}")} + {:error, + GRPC.RPCError.exception(status: :invalid_argument, message: "Error: #{msg}")} other -> # Not an error, return as-is to continue the flow @@ -118,19 +119,35 @@ defmodule GRPC.StreamTest do # Validate that we have the expected exception structure message = Map.get(exception_data, :message) kind = Map.get(exception_data, :kind, :error) - + cond do is_binary(message) and message =~ "Runtime error" -> - {:error, GRPC.RPCError.exception(status: :internal, message: "Validated: RuntimeError - #{message}")} + {:error, + GRPC.RPCError.exception( + status: :internal, + message: "Validated: RuntimeError - #{message}" + )} is_binary(message) and message =~ "Argument is invalid" -> - {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Validated: ArgumentError - #{message}")} + {:error, + GRPC.RPCError.exception( + status: :invalid_argument, + message: "Validated: ArgumentError - #{message}" + )} is_binary(message) -> - {:error, GRPC.RPCError.exception(status: :unknown, message: "Validated: #{kind} - #{message}")} + {:error, + GRPC.RPCError.exception( + status: :unknown, + message: "Validated: #{kind} - #{message}" + )} true -> - {:error, GRPC.RPCError.exception(status: :unknown, message: "Validated but no message found")} + {:error, + GRPC.RPCError.exception( + status: :unknown, + message: "Validated but no message found" + )} end other -> @@ -167,13 +184,22 @@ defmodule GRPC.StreamTest do {:error, {:exception, %{message: msg}}} when is_binary(msg) -> cond do msg =~ "latitude" -> - {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Latitude error: #{msg}")} + {:error, + GRPC.RPCError.exception( + status: :invalid_argument, + message: "Latitude error: #{msg}" + )} msg =~ "longitude" -> - {:error, GRPC.RPCError.exception(status: :invalid_argument, message: "Longitude error: #{msg}")} + {:error, + GRPC.RPCError.exception( + status: :invalid_argument, + message: "Longitude error: #{msg}" + )} true -> - {:error, GRPC.RPCError.exception(status: :unknown, message: "Unknown error: #{msg}")} + {:error, + GRPC.RPCError.exception(status: :unknown, message: "Unknown error: #{msg}")} end other -> @@ -193,7 +219,8 @@ defmodule GRPC.StreamTest do # Test with invalid latitude (0) - should trigger error invalid_point = %Routeguide.Point{latitude: 0, longitude: -746_188_906} - result = Routeguide.RouteGuide.Stub.get_feature(channel, invalid_point, return_headers: true) + result = + Routeguide.RouteGuide.Stub.get_feature(channel, invalid_point, return_headers: true) # Should receive error response with custom message assert {:error, error} = result @@ -212,7 +239,8 @@ defmodule GRPC.StreamTest do # Test with valid latitude (non-zero) - should succeed valid_point = %Routeguide.Point{latitude: 409_146_138, longitude: -746_188_906} - result = Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) + result = + Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) assert {:ok, response, _metadata} = result assert response.location == valid_point @@ -228,7 +256,8 @@ defmodule GRPC.StreamTest do # Test with negative latitude - should trigger error negative_point = %Routeguide.Point{latitude: -50, longitude: 100} - result = Routeguide.RouteGuide.Stub.get_feature(channel, negative_point, return_headers: true) + result = + Routeguide.RouteGuide.Stub.get_feature(channel, negative_point, return_headers: true) # Should receive error response with custom message assert {:error, error} = result @@ -247,7 +276,8 @@ defmodule GRPC.StreamTest do # Test with positive latitude - should succeed valid_point = %Routeguide.Point{latitude: 50, longitude: 100} - result = Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) + result = + Routeguide.RouteGuide.Stub.get_feature(channel, valid_point, return_headers: true) assert {:ok, response, _metadata} = result assert response.location == valid_point @@ -263,19 +293,22 @@ defmodule GRPC.StreamTest do # Test latitude error lat_error_point = %Routeguide.Point{latitude: 0, longitude: 100} assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, lat_error_point) - assert error.status == 3 # INVALID_ARGUMENT + # INVALID_ARGUMENT + assert error.status == 3 assert error.message =~ "Latitude error" # Test longitude error long_error_point = %Routeguide.Point{latitude: 100, longitude: 0} assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, long_error_point) - assert error.status == 3 # INVALID_ARGUMENT + # INVALID_ARGUMENT + assert error.status == 3 assert error.message =~ "Longitude error" # Test ArgumentError (negative latitude) - falls into "Unknown error" branch arg_error_point = %Routeguide.Point{latitude: -100, longitude: 100} assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, arg_error_point) - assert error.status == 2 # UNKNOWN (because message contains "Latitude must be positive") + # UNKNOWN (because message contains "Latitude must be positive") + assert error.status == 2 assert error.message =~ "Latitude must be positive" end) end @@ -287,22 +320,31 @@ defmodule GRPC.StreamTest do # Test RuntimeError - should validate and transform to INTERNAL runtime_error_point = %Routeguide.Point{latitude: 999, longitude: 100} - assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, runtime_error_point) - assert error.status == 13 # INTERNAL + + assert {:error, error} = + Routeguide.RouteGuide.Stub.get_feature(channel, runtime_error_point) + + # INTERNAL + assert error.status == 13 assert error.message =~ "Validated: RuntimeError" assert error.message =~ "Runtime error occurred" # Test ArgumentError - should validate and transform to INVALID_ARGUMENT arg_error_point = %Routeguide.Point{latitude: 888, longitude: 100} assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, arg_error_point) - assert error.status == 3 # INVALID_ARGUMENT + # INVALID_ARGUMENT + assert error.status == 3 assert error.message =~ "Validated: ArgumentError" assert error.message =~ "Argument is invalid" # Test simple string error - should validate and transform to UNKNOWN string_error_point = %Routeguide.Point{latitude: 777, longitude: 100} - assert {:error, error} = Routeguide.RouteGuide.Stub.get_feature(channel, string_error_point) - assert error.status == 2 # UNKNOWN + + assert {:error, error} = + Routeguide.RouteGuide.Stub.get_feature(channel, string_error_point) + + # UNKNOWN + assert error.status == 2 assert error.message =~ "Validated:" assert error.message =~ "Simple string error" From 4862c369e783839fb695911a9648b8f0e82c969c Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 12:29:27 -0300 Subject: [PATCH 44/47] review adjustments --- .../lib/grpc/server/adapters/thousand_island.ex | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index f2a5c79c7..c2052369f 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -385,12 +385,12 @@ defmodule GRPC.Server.Adapters.ThousandIsland do {:ok, pid, actual_port} {:error, {:already_started, pid}} -> - Logger.warning("Failed to start #{servers_name(endpoint, servers)}: already started") + Logger.warning("Failed to start #{server_names(endpoint, servers)}: already started") actual_port = get_actual_port(pid, port) {:ok, pid, actual_port} {:error, :eaddrinuse} = error -> - Logger.error("Failed to start #{servers_name(endpoint, servers)}: port already in use") + Logger.error("Failed to start #{server_names(endpoint, servers)}: port already in use") error {:error, _} = error -> @@ -416,10 +416,10 @@ defmodule GRPC.Server.Adapters.ThousandIsland do scheme = if cred_opts(opts), do: :https, else: :http Logger.info( - "Starting #{servers_name(endpoint, servers)} with ThousandIsland using #{scheme}://0.0.0.0:#{port}" + "Starting #{server_names(endpoint, servers)} with ThousandIsland using #{scheme}://0.0.0.0:#{port}" ) - server_name = servers_name(endpoint, servers) + server_name = server_names(endpoint, servers) %{ id: server_name, @@ -623,11 +623,11 @@ defmodule GRPC.Server.Adapters.ThousandIsland do opts[:cred] end - defp servers_name(nil, servers) do + defp server_names(nil, servers) do Enum.map_join(servers, ",", fn _k, s -> inspect(s) end) end - defp servers_name(endpoint, _) do + defp server_names(endpoint, _) do inspect(endpoint) end end From 180fdbd2501074bcc13895ef45805924d4f0bf3a Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 19:50:01 -0300 Subject: [PATCH 45/47] format --- .../lib/grpc/server/adapters/thousand_island/handler.ex | 5 ++++- grpc_server/lib/grpc/server/supervisor.ex | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex index c9ac19a18..b78f2ff8a 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/handler.ex @@ -97,7 +97,10 @@ defmodule GRPC.Server.Adapters.ThousandIsland.Handler do end def handle_info({:register_stream_task, stream_id, task_pid, task_ref}, {socket, state}) do - Logger.debug("[Handler] Registering stream task for stream #{stream_id}, pid=#{inspect(task_pid)}") + Logger.debug( + "[Handler] Registering stream task for stream #{stream_id}, pid=#{inspect(task_pid)}" + ) + new_stream_tasks = Map.put(state.stream_tasks, stream_id, {task_pid, task_ref}) {:noreply, {socket, %{state | stream_tasks: new_stream_tasks}}} end diff --git a/grpc_server/lib/grpc/server/supervisor.ex b/grpc_server/lib/grpc/server/supervisor.ex index 59469074d..d23382bde 100644 --- a/grpc_server/lib/grpc/server/supervisor.ex +++ b/grpc_server/lib/grpc/server/supervisor.ex @@ -104,7 +104,7 @@ defmodule GRPC.Server.Supervisor do end GRPC.Server.Cache.init() - + children = [{Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}] ++ if opts[:start_server] do From 91349ef7965a5dfb901ea93542aba2544696ad7d Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 21:32:17 -0300 Subject: [PATCH 46/47] chore: create your own supervisor for Thousand Island. --- .../transport/http2/flow_control_test.exs | 2 +- grpc_server/lib/grpc/server.ex | 13 -- .../grpc/server/adapters/cowboy/supervisor.ex | 53 ++++++ .../grpc/server/adapters/thousand_island.ex | 31 ++-- .../adapters/thousand_island/supervisor.ex | 160 ++++++++++++++++++ grpc_server/lib/grpc/server/supervisor.ex | 11 +- .../server/adapters/thousand_island_test.exs | 26 +-- .../test/grpc/server/supervisor_test.exs | 16 +- 8 files changed, 254 insertions(+), 58 deletions(-) create mode 100644 grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex create mode 100644 grpc_server/lib/grpc/server/adapters/thousand_island/supervisor.ex diff --git a/grpc_core/test/grpc/transport/http2/flow_control_test.exs b/grpc_core/test/grpc/transport/http2/flow_control_test.exs index 923f60d90..9fd3eda7b 100644 --- a/grpc_core/test/grpc/transport/http2/flow_control_test.exs +++ b/grpc_core/test/grpc/transport/http2/flow_control_test.exs @@ -68,7 +68,7 @@ defmodule GRPC.Transport.HTTP2.FlowControlTest do # Receive 100MB of data data_size = 100 * 1024 * 1024 - {new_window, increment} = FlowControl.compute_recv_window(initial_window, data_size) + {new_window, _increment} = FlowControl.compute_recv_window(initial_window, data_size) # Window should be decreased by data size, then potentially increased by increment # Just verify the window is reasonable (non-negative and less than max) diff --git a/grpc_server/lib/grpc/server.ex b/grpc_server/lib/grpc/server.ex index dc5e9cac7..57b2370d2 100644 --- a/grpc_server/lib/grpc/server.ex +++ b/grpc_server/lib/grpc/server.ex @@ -404,13 +404,6 @@ defmodule GRPC.Server do @spec start(module() | [module()], non_neg_integer(), Keyword.t()) :: {atom(), any(), non_neg_integer()} | {:error, any()} def start(servers, port, opts \\ []) do - # Ensure Task.Supervisor is started for streaming RPCs - # (When using Supervisor, it's started there; when using start/3 directly, we need it here) - case Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) do - {:ok, _pid} -> :ok - {:error, {:already_started, _pid}} -> :ok - end - adapter = Keyword.get(opts, :adapter) || GRPC.Server.Adapters.Cowboy servers = GRPC.Server.servers_to_map(servers) adapter.start(nil, servers, port, opts) @@ -420,12 +413,6 @@ defmodule GRPC.Server do @spec start_endpoint(atom(), non_neg_integer(), Keyword.t()) :: {atom(), any(), non_neg_integer()} def start_endpoint(endpoint, port, opts \\ []) do - # Ensure Task.Supervisor is started for streaming RPCs - case Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) do - {:ok, _pid} -> :ok - {:error, {:already_started, _pid}} -> :ok - end - opts = Keyword.validate!(opts, adapter: GRPC.Server.Adapters.Cowboy) adapter = opts[:adapter] servers = endpoint.__meta__(:servers) diff --git a/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex b/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex new file mode 100644 index 000000000..1cd453a19 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex @@ -0,0 +1,53 @@ +defmodule GRPC.Server.Adapters.Cowboy.Supervisor do + @moduledoc """ + Supervisor for Cowboy adapter. + + This supervisor manages the lifecycle of the Cowboy/Ranch server and + provides isolation from other adapters. It ensures the Task.Supervisor + is started before the Cowboy server. + + ## Supervision Tree + + ``` + GRPC.Server.Supervisor + └── Cowboy.Supervisor (this module) + ├── Task.Supervisor (for stream tasks) + └── Ranch Listener (Cowboy server) + ``` + """ + + use Supervisor + require Logger + + @doc """ + Starts the Cowboy supervisor. + + ## Options + + * `:endpoint` - The endpoint module (optional) + * `:servers` - Map of service name => server modules + * `:port` - The port to listen on + * `:adapter_opts` - Cowboy-specific options + * `:cred` - SSL credentials (optional, for HTTPS) + """ + def start_link(opts) do + Supervisor.start_link(__MODULE__, opts, name: __MODULE__) + end + + @impl true + def init(opts) do + endpoint = opts[:endpoint] + servers = opts[:servers] + port = opts[:port] + + # Get the cowboy child spec + cowboy_child_spec = GRPC.Server.Adapters.Cowboy.child_spec(endpoint, servers, port, opts) + + children = [ + {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, + cowboy_child_spec + ] + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island.ex b/grpc_server/lib/grpc/server/adapters/thousand_island.ex index c2052369f..053a44588 100644 --- a/grpc_server/lib/grpc/server/adapters/thousand_island.ex +++ b/grpc_server/lib/grpc/server/adapters/thousand_island.ex @@ -377,6 +377,17 @@ defmodule GRPC.Server.Adapters.ThousandIsland do """ @impl true def start(endpoint, servers, port, opts) do + case Process.whereis(GRPC.Server.StreamTaskSupervisor) do + nil -> + case Task.Supervisor.start_link(name: GRPC.Server.StreamTaskSupervisor) do + {:ok, _pid} -> :ok + {:error, {:already_started, _pid}} -> :ok + end + + _pid -> + :ok + end + server_opts = build_server_opts(endpoint, servers, port, opts) case ThousandIsland.start_link(server_opts) do @@ -411,19 +422,17 @@ defmodule GRPC.Server.Adapters.ThousandIsland do @spec child_spec(atom(), %{String.t() => [module()]}, non_neg_integer(), Keyword.t()) :: Supervisor.child_spec() def child_spec(endpoint, servers, port, opts) do - server_opts = build_server_opts(endpoint, servers, port, opts) - - scheme = if cred_opts(opts), do: :https, else: :http - - Logger.info( - "Starting #{server_names(endpoint, servers)} with ThousandIsland using #{scheme}://0.0.0.0:#{port}" - ) - - server_name = server_names(endpoint, servers) + supervisor_opts = [ + endpoint: endpoint, + servers: servers, + port: port, + adapter_opts: Keyword.get(opts, :adapter_opts, []), + cred: opts[:cred] + ] %{ - id: server_name, - start: {ThousandIsland, :start_link, [server_opts]}, + id: __MODULE__.Supervisor, + start: {__MODULE__.Supervisor, :start_link, [supervisor_opts]}, type: :supervisor, restart: :permanent, shutdown: :infinity diff --git a/grpc_server/lib/grpc/server/adapters/thousand_island/supervisor.ex b/grpc_server/lib/grpc/server/adapters/thousand_island/supervisor.ex new file mode 100644 index 000000000..f079186d2 --- /dev/null +++ b/grpc_server/lib/grpc/server/adapters/thousand_island/supervisor.ex @@ -0,0 +1,160 @@ +defmodule GRPC.Server.Adapters.ThousandIsland.Supervisor do + @moduledoc """ + Supervisor for ThousandIsland adapter. + + This supervisor manages the lifecycle of the ThousandIsland server and + provides isolation from other adapters. It encapsulates all ThousandIsland-specific + configuration and startup logic. + + ## Supervision Tree + + ``` + GRPC.Server.Supervisor + └── ThousandIsland.Supervisor (this module) + └── ThousandIsland (actual socket server) + ├── Acceptor Pool + ├── Connection Handlers + └── Handler Processes + ``` + + ## Responsibilities + + - Configures ThousandIsland server with gRPC-specific settings + - Manages HTTP/2 settings and transport options + - Handles SSL/TLS configuration + - Provides clean shutdown on termination + """ + + use Supervisor + require Logger + + alias GRPC.Server.Adapters.ThousandIsland.Handler + + @default_num_acceptors 10 + @default_max_connections 16_384 + + @doc """ + Starts the ThousandIsland supervisor. + + ## Options + + * `:endpoint` - The endpoint module (optional) + * `:servers` - Map of service name => server modules + * `:port` - The port to listen on + * `:adapter_opts` - ThousandIsland-specific options (see below) + * `:cred` - SSL credentials (optional, for HTTPS) + + ## Adapter Options + + * `:num_acceptors` - Number of acceptor processes (default: 10) + * `:num_connections` - Maximum number of connections (default: 16384) + * `:ip` - IP address to bind to (default: {0, 0, 0, 0}) + * `:transport_options` - Additional transport options + """ + def start_link(opts) do + Supervisor.start_link(__MODULE__, opts, name: __MODULE__) + end + + @impl true + def init(opts) do + endpoint = opts[:endpoint] + servers = opts[:servers] + port = opts[:port] + + server_opts = build_server_opts(endpoint, servers, port, opts) + + scheme = if cred_opts(opts), do: :https, else: :http + server_name = server_names(endpoint, servers) + + Logger.info("Starting #{server_name} with ThousandIsland using #{scheme}://0.0.0.0:#{port}") + + children = [ + {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, + {ThousandIsland, server_opts} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + defp build_server_opts(endpoint, servers, port, opts) do + adapter_opts = Keyword.get(opts, :adapter_opts, opts) + + num_acceptors = Keyword.get(adapter_opts, :num_acceptors, @default_num_acceptors) + num_connections = Keyword.get(adapter_opts, :num_connections, @default_max_connections) + + transport_opts = + adapter_opts + |> Keyword.get(:transport_options, []) + |> Keyword.put(:port, port) + |> maybe_add_ip(adapter_opts) + |> maybe_add_ssl(cred_opts(opts)) + # Optimize TCP buffers for gRPC performance (support up to 1MB messages) + # 1MB buffer for large messages + |> Keyword.put_new(:buffer, 1_048_576) + # 1MB receive buffer + |> Keyword.put_new(:recbuf, 1_048_576) + # 1MB send buffer + |> Keyword.put_new(:sndbuf, 1_048_576) + # Disable Nagle's algorithm for low latency + |> Keyword.put_new(:nodelay, true) + + # Configure HTTP/2 settings for larger frames (needed for large gRPC messages) + local_settings = [ + # 1MB window size for large payloads + initial_window_size: 1_048_576, + # Keep default max frame size + max_frame_size: 16_384 + ] + + handler_options = %{ + endpoint: endpoint, + servers: servers, + opts: [local_settings: local_settings] + } + + Logger.debug("[ThousandIsland.Supervisor] Creating server configuration") + + [ + port: port, + transport_module: transport_module(opts), + transport_options: transport_opts, + handler_module: Handler, + handler_options: handler_options, + num_acceptors: num_acceptors, + num_connections: num_connections + ] + end + + defp maybe_add_ip(transport_opts, adapter_opts) do + case Keyword.get(adapter_opts, :ip) do + nil -> transport_opts + ip -> Keyword.put(transport_opts, :ip, ip) + end + end + + defp maybe_add_ssl(transport_opts, nil), do: transport_opts + + defp maybe_add_ssl(transport_opts, cred_opts) do + Keyword.merge(transport_opts, cred_opts.ssl) + end + + defp transport_module(opts) do + if cred_opts(opts) do + ThousandIsland.Transports.SSL + else + ThousandIsland.Transports.TCP + end + end + + defp cred_opts(opts) do + opts[:cred] + end + + defp server_names(nil, servers) do + Enum.map_join(servers, ",", fn {_k, s} -> inspect(s) end) + end + + defp server_names(endpoint, _) do + inspect(endpoint) + end +end diff --git a/grpc_server/lib/grpc/server/supervisor.ex b/grpc_server/lib/grpc/server/supervisor.ex index d23382bde..0f7ea1928 100644 --- a/grpc_server/lib/grpc/server/supervisor.ex +++ b/grpc_server/lib/grpc/server/supervisor.ex @@ -106,12 +106,11 @@ defmodule GRPC.Server.Supervisor do GRPC.Server.Cache.init() children = - [{Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}] ++ - if opts[:start_server] do - [child_spec(endpoint_or_servers, opts[:port], opts)] - else - [] - end + if opts[:start_server] do + [child_spec(endpoint_or_servers, opts[:port], opts)] + else + [] + end Supervisor.init(children, strategy: :one_for_one) end diff --git a/grpc_server/test/grpc/server/adapters/thousand_island_test.exs b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs index 82829afa9..dee718580 100644 --- a/grpc_server/test/grpc/server/adapters/thousand_island_test.exs +++ b/grpc_server/test/grpc/server/adapters/thousand_island_test.exs @@ -7,24 +7,26 @@ defmodule GRPC.Server.Adapters.ThousandIslandTest do test "returns valid child spec" do spec = Adapter.child_spec(:test_endpoint, [], 50051, []) - # After moving Task.Supervisor to GRPC.Server.Supervisor, - # the spec now returns ThousandIsland's child spec directly assert is_map(spec) + assert spec.id == Adapter.Supervisor assert spec.type == :supervisor - assert {ThousandIsland, :start_link, [args]} = spec.start - assert is_list(args) - assert args[:port] == 50051 - assert args[:handler_module] == GRPC.Server.Adapters.ThousandIsland.Handler + assert {Adapter.Supervisor, :start_link, [opts_list]} = spec.start + assert is_list(opts_list) + assert Keyword.get(opts_list, :endpoint) == :test_endpoint + assert Keyword.get(opts_list, :servers) == [] + assert Keyword.get(opts_list, :port) == 50051 + assert Keyword.get(opts_list, :adapter_opts) == [] + assert Keyword.get(opts_list, :cred) == nil end test "includes adapter options in child spec" do - opts = [num_acceptors: 5, num_connections: 50] - spec = Adapter.child_spec(:test_endpoint, [], 50051, opts) + adapter_opts = [num_acceptors: 5, num_connections: 50] + spec = Adapter.child_spec(:test_endpoint, [], 50051, adapter_opts: adapter_opts) - {ThousandIsland, :start_link, [args]} = spec.start - - assert args[:num_acceptors] == 5 - assert args[:num_connections] == 50 + {Adapter.Supervisor, :start_link, [opts_list]} = spec.start + adapter_opts_kw = Keyword.get(opts_list, :adapter_opts) + assert Keyword.get(adapter_opts_kw, :num_acceptors) == 5 + assert Keyword.get(adapter_opts_kw, :num_connections) == 50 end end diff --git a/grpc_server/test/grpc/server/supervisor_test.exs b/grpc_server/test/grpc/server/supervisor_test.exs index bc63e7966..7ff5b22e9 100644 --- a/grpc_server/test/grpc/server/supervisor_test.exs +++ b/grpc_server/test/grpc/server/supervisor_test.exs @@ -11,15 +11,7 @@ defmodule GRPC.Server.SupervisorTest do test "does not start children if opts sets false" do assert { :ok, - {%{strategy: :one_for_one}, - [ - %{ - id: GRPC.Server.StreamTaskSupervisor, - start: - {Task.Supervisor, :start_link, [[name: GRPC.Server.StreamTaskSupervisor]]}, - type: :supervisor - } - ]} + {%{strategy: :one_for_one}, []} } = Supervisor.init(endpoint: MockEndpoint, port: 1234, start_server: false) end @@ -45,12 +37,6 @@ defmodule GRPC.Server.SupervisorTest do { %{strategy: :one_for_one, auto_shutdown: :never, intensity: 3, period: 5}, [ - %{ - id: GRPC.Server.StreamTaskSupervisor, - start: - {Task.Supervisor, :start_link, [[name: GRPC.Server.StreamTaskSupervisor]]}, - type: :supervisor - }, %{ id: {:ranch_embedded_sup, ^endpoint_str}, start: From c2f6135dfb63f7afefa115cd8f66cb7d29c31c39 Mon Sep 17 00:00:00 2001 From: Adriano Santos Date: Tue, 16 Dec 2025 21:34:13 -0300 Subject: [PATCH 47/47] remove dead code --- .../grpc/server/adapters/cowboy/supervisor.ex | 53 ------------------- 1 file changed, 53 deletions(-) delete mode 100644 grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex diff --git a/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex b/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex deleted file mode 100644 index 1cd453a19..000000000 --- a/grpc_server/lib/grpc/server/adapters/cowboy/supervisor.ex +++ /dev/null @@ -1,53 +0,0 @@ -defmodule GRPC.Server.Adapters.Cowboy.Supervisor do - @moduledoc """ - Supervisor for Cowboy adapter. - - This supervisor manages the lifecycle of the Cowboy/Ranch server and - provides isolation from other adapters. It ensures the Task.Supervisor - is started before the Cowboy server. - - ## Supervision Tree - - ``` - GRPC.Server.Supervisor - └── Cowboy.Supervisor (this module) - ├── Task.Supervisor (for stream tasks) - └── Ranch Listener (Cowboy server) - ``` - """ - - use Supervisor - require Logger - - @doc """ - Starts the Cowboy supervisor. - - ## Options - - * `:endpoint` - The endpoint module (optional) - * `:servers` - Map of service name => server modules - * `:port` - The port to listen on - * `:adapter_opts` - Cowboy-specific options - * `:cred` - SSL credentials (optional, for HTTPS) - """ - def start_link(opts) do - Supervisor.start_link(__MODULE__, opts, name: __MODULE__) - end - - @impl true - def init(opts) do - endpoint = opts[:endpoint] - servers = opts[:servers] - port = opts[:port] - - # Get the cowboy child spec - cowboy_child_spec = GRPC.Server.Adapters.Cowboy.child_spec(endpoint, servers, port, opts) - - children = [ - {Task.Supervisor, name: GRPC.Server.StreamTaskSupervisor}, - cowboy_child_spec - ] - - Supervisor.init(children, strategy: :one_for_one) - end -end