diff --git a/.gitignore b/.gitignore index 80bdb160..83a78bae 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ rebar3.crashdump doc/ priv/*.so priv/*.dll +rebar.lock diff --git a/rebar.config b/rebar.config index 40947ceb..4d85e8bc 100644 --- a/rebar.config +++ b/rebar.config @@ -14,6 +14,17 @@ ]}. {xref_checks, [undefined_function_calls]}. +%% Ignore xref warnings for optional prometheus dependency +{xref_ignores, [ + {prometheus_counter, inc, 3}, + {prometheus_counter, declare, 1}, + {prometheus_gauge, set, 3}, + {prometheus_gauge, inc, 2}, + {prometheus_gauge, dec, 2}, + {prometheus_gauge, declare, 1}, + {prometheus_histogram, observe, 3}, + {prometheus_histogram, declare, 1} +]}. {cover_enabled, true}. {eunit_opts, [verbose]}. @@ -37,7 +48,6 @@ {idna, "~>6.1.0"}, {mimerl, "~>1.4"}, {certifi, "~>2.15.0"}, - {metrics, "~>1.0.0"}, {parse_trans, "3.4.1"}, {ssl_verify_fun, "~>1.1.0"}, {unicode_util_compat, "~>0.7.1"} @@ -85,6 +95,8 @@ error_handling%, %unknown ]}, + %% Exclude prometheus backend - prometheus is an optional dependency + {exclude_mods, [hackney_metrics_prometheus]}, {base_plt_apps, [erts, stdlib, kernel, crypto, runtime_tools]}, {plt_apps, top_level_deps}, {plt_extra_apps, []}, diff --git a/rebar.lock b/rebar.lock deleted file mode 100644 index 75d2cd31..00000000 --- a/rebar.lock +++ /dev/null @@ -1,26 +0,0 @@ -{"1.2.0", -[{<<"certifi">>,{pkg,<<"certifi">>,<<"2.15.0">>},0}, - {<<"idna">>,{pkg,<<"idna">>,<<"6.1.1">>},0}, - {<<"metrics">>,{pkg,<<"metrics">>,<<"1.0.1">>},0}, - {<<"mimerl">>,{pkg,<<"mimerl">>,<<"1.4.0">>},0}, - {<<"parse_trans">>,{pkg,<<"parse_trans">>,<<"3.4.1">>},0}, - {<<"ssl_verify_fun">>,{pkg,<<"ssl_verify_fun">>,<<"1.1.7">>},0}, - {<<"unicode_util_compat">>,{pkg,<<"unicode_util_compat">>,<<"0.7.0">>},0}]}. -[ -{pkg_hash,[ - {<<"certifi">>, <<"0E6E882FCDAAA0A5A9F2B3DB55B1394DBA07E8D6D9BCAD08318FB604C6839712">>}, - {<<"idna">>, <<"8A63070E9F7D0C62EB9D9FCB360A7DE382448200FBBD1B106CC96D3D8099DF8D">>}, - {<<"metrics">>, <<"25F094DEA2CDA98213CECC3AEFF09E940299D950904393B2A29D191C346A8486">>}, - {<<"mimerl">>, <<"3882A5CA67FBBE7117BA8947F27643557ADEC38FA2307490C4C4207624CB213B">>}, - {<<"parse_trans">>, <<"6E6AA8167CB44CC8F39441D05193BE6E6F4E7C2946CB2759F015F8C56B76E5FF">>}, - {<<"ssl_verify_fun">>, <<"354C321CF377240C7B8716899E182CE4890C5938111A1296ADD3EC74CF1715DF">>}, - {<<"unicode_util_compat">>, <<"BC84380C9AB48177092F43AC89E4DFA2C6D62B40B8BD132B1059ECC7232F9A78">>}]}, -{pkg_hash_ext,[ - {<<"certifi">>, <<"B147ED22CE71D72EAFDAD94F055165C1C182F61A2FF49DF28BCC71D1D5B94A60">>}, - {<<"idna">>, <<"92376EB7894412ED19AC475E4A86F7B413C1B9FBB5BD16DCCD57934157944CEA">>}, - {<<"metrics">>, <<"69B09ADDDC4F74A40716AE54D140F93BEB0FB8978D8636EADED0C31B6F099F16">>}, - {<<"mimerl">>, <<"13AF15F9F68C65884ECCA3A3891D50A7B57D82152792F3E19D88650AA126B144">>}, - {<<"parse_trans">>, <<"620A406CE75DADA827B82E453C19CF06776BE266F5A67CFF34E1EF2CBB60E49A">>}, - {<<"ssl_verify_fun">>, <<"FE4C190E8F37401D30167C8C405EDA19469F34577987C76DDE613E838BBC67F8">>}, - {<<"unicode_util_compat">>, <<"25EEE6D67DF61960CF6A794239566599B09E17E668D3700247BC498638152521">>}]} -]. diff --git a/src/hackney.app.src b/src/hackney.app.src index 70ce77de..d769ac38 100644 --- a/src/hackney.app.src +++ b/src/hackney.app.src @@ -16,7 +16,6 @@ mimerl, certifi, ssl_verify_fun, - metrics, unicode_util_compat]}, {included_applications, []}, {mod, { hackney_app, []}}, diff --git a/src/hackney_manager.erl b/src/hackney_manager.erl index 6c9703b2..1f6ca151 100644 --- a/src/hackney_manager.erl +++ b/src/hackney_manager.erl @@ -12,8 +12,7 @@ %% Metrics API -export([start_request/1, - finish_request/2, - get_metrics_engine/0]). + finish_request/2]). %% Backward compatibility API -export([get_state/1, async_response_pid/1]). @@ -23,9 +22,7 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --record(state, { - metrics_engine -}). +-record(state, {}). %%==================================================================== %% API @@ -41,16 +38,11 @@ start_request(Host) -> finish_request(Host, StartTime) -> gen_server:cast(?MODULE, {finish_request, Host, StartTime}). -%% @doc Get the current metrics engine. --spec get_metrics_engine() -> module(). -get_metrics_engine() -> - hackney_metrics:get_engine(). - %% @doc Check the state of a connection (backward compatibility). %% In the old architecture, this tracked request state. %% In the new architecture, we simply check if the connection process is alive. -%% Returns `req_not_found' if the process is dead, or the connection state. --spec get_state(pid() | term()) -> req_not_found | term(). +%% Returns `req_not_found' if the process is dead, or the connection state name. +-spec get_state(pid() | term()) -> req_not_found | atom(). get_state(ConnPid) when is_pid(ConnPid) -> case is_process_alive(ConnPid) of false -> req_not_found; @@ -63,21 +55,15 @@ get_state(ConnPid) when is_pid(ConnPid) -> get_state(_) -> req_not_found. -%% @doc Check if a connection is in async mode (backward compatibility). -%% In the old architecture, this returned the async response process PID. -%% In the new architecture, we check if the connection process is in async mode. -%% Returns `{error, req_not_async}' if not in async mode. --spec async_response_pid(pid() | term()) -> {ok, pid()} | {error, req_not_async}. -async_response_pid(ConnPid) when is_pid(ConnPid) -> - case is_process_alive(ConnPid) of - false -> {error, req_not_async}; - true -> - case hackney_conn:get_state(ConnPid) of - {ok, State} when State =:= receiving; State =:= streaming -> - {ok, ConnPid}; - _ -> - {error, req_not_async} - end +%% @doc Get the async response pid (backward compatibility). +%% In the new architecture, all streaming connections are considered "async". +-spec async_response_pid(pid()) -> {ok, pid()} | {error, req_not_found | req_not_async}. +async_response_pid(Ref) when is_pid(Ref) -> + case get_state(Ref) of + req_not_found -> {error, req_not_found}; + streaming -> {ok, Ref}; + streaming_once -> {ok, Ref}; + _ -> {error, req_not_async} end; async_response_pid(_) -> {error, req_not_async}. @@ -90,28 +76,27 @@ start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). init([]) -> - %% Initialize metrics - Engine = hackney_metrics:get_engine(), - _ = metrics:new(Engine, counter, [hackney, nb_requests]), - _ = metrics:new(Engine, counter, [hackney, total_requests]), - _ = metrics:new(Engine, counter, [hackney, finished_requests]), - {ok, #state{metrics_engine = Engine}}. + {ok, #state{}}. handle_call(_Request, _From, State) -> {reply, ok, State}. -handle_cast({start_request, Host}, #state{metrics_engine = Engine} = State) -> - _ = metrics:increment_counter(Engine, [hackney, Host, nb_requests]), - _ = metrics:increment_counter(Engine, [hackney, nb_requests]), - _ = metrics:increment_counter(Engine, [hackney, total_requests]), +handle_cast({start_request, Host}, State) -> + HostBin = to_binary(Host), + Labels = #{host => HostBin}, + _ = hackney_metrics:counter_inc(hackney_requests_total, Labels), + _ = hackney_metrics:gauge_inc(hackney_requests_active, Labels), {noreply, State}; -handle_cast({finish_request, Host, StartTime}, #state{metrics_engine = Engine} = State) -> - RequestTime = timer:now_diff(os:timestamp(), StartTime) / 1000, - _ = metrics:update_histogram(Engine, [hackney, Host, request_time], RequestTime), - _ = metrics:decrement_counter(Engine, [hackney, Host, nb_requests]), - _ = metrics:decrement_counter(Engine, [hackney, nb_requests]), - _ = metrics:increment_counter(Engine, [hackney, finished_requests]), +handle_cast({finish_request, Host, StartTime}, State) -> + HostBin = to_binary(Host), + Labels = #{host => HostBin}, + %% Calculate duration in seconds (Prometheus convention) + DurationMicros = timer:now_diff(os:timestamp(), StartTime), + DurationSeconds = DurationMicros / 1000000, + _ = hackney_metrics:histogram_observe(hackney_request_duration_seconds, Labels, DurationSeconds), + _ = hackney_metrics:gauge_dec(hackney_requests_active, Labels), + _ = hackney_metrics:counter_inc(hackney_requests_finished_total, Labels), {noreply, State}; handle_cast(_Msg, State) -> @@ -125,3 +110,11 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. + +%%==================================================================== +%% Internal functions +%%==================================================================== + +to_binary(Host) when is_binary(Host) -> Host; +to_binary(Host) when is_list(Host) -> list_to_binary(Host); +to_binary(Host) when is_atom(Host) -> atom_to_binary(Host, utf8). diff --git a/src/hackney_metrics.erl b/src/hackney_metrics.erl index 02fe1f75..f6f46527 100644 --- a/src/hackney_metrics.erl +++ b/src/hackney_metrics.erl @@ -3,7 +3,7 @@ %%% This file is part of hackney released under the Apache 2 license. %%% See the NOTICE for more information. %%% -%%% Copyright (c) 2012-2018 Benoît Chesneau +%%% Copyright (c) 2012-2026 Benoît Chesneau %%% -module(hackney_metrics). @@ -12,16 +12,122 @@ %% API -export([ init/0, - get_engine/0 + get_backend/0 ]). +%% Counter operations +-export([ + counter_inc/2, + counter_inc/3 +]). + +%% Gauge operations +-export([ + gauge_set/3, + gauge_inc/2, + gauge_dec/2 +]). + +%% Histogram operations +-export([ + histogram_observe/3 +]). + +%% Metric declarations +-export([ + declare_counter/3, + declare_gauge/3, + declare_histogram/3, + declare_histogram/4, + declare_pool_metrics/1 +]). -include("hackney.hrl"). +%% Default duration histogram buckets (in seconds) +-define(DEFAULT_DURATION_BUCKETS, [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]). +%% @doc Initialize the metrics system. +%% Determines the backend to use and declares all hackney metrics. init() -> - Metrics = metrics:init(hackney_util:mod_metrics()), - ets:insert(?CONFIG, {mod_metrics, Metrics}). + Backend = hackney_util:mod_metrics(), + ets:insert(?CONFIG, {metrics_backend, Backend}), + declare_metrics(Backend). + +%% @doc Get the current metrics backend module. +get_backend() -> + try + ets:lookup_element(?CONFIG, metrics_backend, 2) + catch + error:badarg -> + %% ETS table not ready yet, return dummy backend + hackney_metrics_dummy + end. + +%% @doc Increment a counter by 1. +counter_inc(Name, Labels) -> + (get_backend()):counter_inc(Name, Labels). + +%% @doc Increment a counter by a value. +counter_inc(Name, Labels, Value) -> + (get_backend()):counter_inc(Name, Labels, Value). + +%% @doc Set a gauge to a value. +gauge_set(Name, Labels, Value) -> + (get_backend()):gauge_set(Name, Labels, Value). + +%% @doc Increment a gauge by 1. +gauge_inc(Name, Labels) -> + (get_backend()):gauge_inc(Name, Labels). + +%% @doc Decrement a gauge by 1. +gauge_dec(Name, Labels) -> + (get_backend()):gauge_dec(Name, Labels). + +%% @doc Observe a value for a histogram. +histogram_observe(Name, Labels, Value) -> + (get_backend()):histogram_observe(Name, Labels, Value). + +%% @doc Declare a counter metric. +declare_counter(Name, Help, LabelKeys) -> + (get_backend()):declare_counter(Name, Help, LabelKeys). + +%% @doc Declare a gauge metric. +declare_gauge(Name, Help, LabelKeys) -> + (get_backend()):declare_gauge(Name, Help, LabelKeys). + +%% @doc Declare a histogram metric with default buckets. +declare_histogram(Name, Help, LabelKeys) -> + (get_backend()):declare_histogram(Name, Help, LabelKeys). + +%% @doc Declare a histogram metric with custom buckets. +declare_histogram(Name, Help, LabelKeys, Buckets) -> + (get_backend()):declare_histogram(Name, Help, LabelKeys, Buckets). + +%% @doc Declare pool-specific metrics. +%% Called when a new pool is created. +declare_pool_metrics(_PoolName) -> + Backend = get_backend(), + %% Only declare once (idempotent for prometheus) + Backend:declare_gauge(hackney_pool_free_count, + <<"Number of free/available connections in the pool">>, [pool]), + Backend:declare_gauge(hackney_pool_in_use_count, + <<"Number of connections currently in use">>, [pool]), + Backend:declare_counter(hackney_pool_checkouts_total, + <<"Total number of connection checkouts">>, [pool]), + ok. -get_engine() -> - ets:lookup_element(?CONFIG, mod_metrics, 2). \ No newline at end of file +%% @private +%% Declare all hackney metrics at startup. +declare_metrics(Backend) -> + %% Request metrics + Backend:declare_counter(hackney_requests_total, + <<"Total number of HTTP requests started">>, [host]), + Backend:declare_gauge(hackney_requests_active, + <<"Number of currently active HTTP requests">>, [host]), + Backend:declare_counter(hackney_requests_finished_total, + <<"Total number of HTTP requests finished">>, [host]), + Backend:declare_histogram(hackney_request_duration_seconds, + <<"HTTP request duration in seconds">>, [host], ?DEFAULT_DURATION_BUCKETS), + %% Pool metrics are declared when pools are created + ok. diff --git a/src/hackney_metrics_backend.erl b/src/hackney_metrics_backend.erl new file mode 100644 index 00000000..963bb543 --- /dev/null +++ b/src/hackney_metrics_backend.erl @@ -0,0 +1,33 @@ +%%% -*- erlang -*- +%%% +%%% This file is part of hackney released under the Apache 2 license. +%%% See the NOTICE for more information. +%%% +%%% Copyright (c) 2012-2026 Benoît Chesneau +%%% + +-module(hackney_metrics_backend). +-author("benoitc"). + +%% Behaviour callbacks for hackney metrics backends +%% +%% Implementations must export all callback functions. +%% See hackney_metrics_dummy for a reference implementation. + +%% Counter operations (monotonically increasing) +-callback counter_inc(Name :: atom(), Labels :: map()) -> ok. +-callback counter_inc(Name :: atom(), Labels :: map(), Value :: number()) -> ok. + +%% Gauge operations (can go up or down) +-callback gauge_set(Name :: atom(), Labels :: map(), Value :: number()) -> ok. +-callback gauge_inc(Name :: atom(), Labels :: map()) -> ok. +-callback gauge_dec(Name :: atom(), Labels :: map()) -> ok. + +%% Histogram operations (for timing/distribution measurements) +-callback histogram_observe(Name :: atom(), Labels :: map(), Value :: number()) -> ok. + +%% Metric lifecycle +-callback declare_counter(Name :: atom(), Help :: binary(), LabelKeys :: [atom()]) -> ok. +-callback declare_gauge(Name :: atom(), Help :: binary(), LabelKeys :: [atom()]) -> ok. +-callback declare_histogram(Name :: atom(), Help :: binary(), LabelKeys :: [atom()]) -> ok. +-callback declare_histogram(Name :: atom(), Help :: binary(), LabelKeys :: [atom()], Buckets :: [number()]) -> ok. diff --git a/src/hackney_metrics_dummy.erl b/src/hackney_metrics_dummy.erl new file mode 100644 index 00000000..da824df1 --- /dev/null +++ b/src/hackney_metrics_dummy.erl @@ -0,0 +1,44 @@ +%%% -*- erlang -*- +%%% +%%% This file is part of hackney released under the Apache 2 license. +%%% See the NOTICE for more information. +%%% +%%% Copyright (c) 2012-2026 Benoît Chesneau +%%% + +-module(hackney_metrics_dummy). +-author("benoitc"). + +-behaviour(hackney_metrics_backend). + +%% hackney_metrics_backend callbacks +-export([ + counter_inc/2, + counter_inc/3, + gauge_set/3, + gauge_inc/2, + gauge_dec/2, + histogram_observe/3, + declare_counter/3, + declare_gauge/3, + declare_histogram/3, + declare_histogram/4 +]). + +%% Counter operations - no-op +counter_inc(_Name, _Labels) -> ok. +counter_inc(_Name, _Labels, _Value) -> ok. + +%% Gauge operations - no-op +gauge_set(_Name, _Labels, _Value) -> ok. +gauge_inc(_Name, _Labels) -> ok. +gauge_dec(_Name, _Labels) -> ok. + +%% Histogram operations - no-op +histogram_observe(_Name, _Labels, _Value) -> ok. + +%% Metric lifecycle - no-op +declare_counter(_Name, _Help, _LabelKeys) -> ok. +declare_gauge(_Name, _Help, _LabelKeys) -> ok. +declare_histogram(_Name, _Help, _LabelKeys) -> ok. +declare_histogram(_Name, _Help, _LabelKeys, _Buckets) -> ok. diff --git a/src/hackney_metrics_prometheus.erl b/src/hackney_metrics_prometheus.erl new file mode 100644 index 00000000..965b78e9 --- /dev/null +++ b/src/hackney_metrics_prometheus.erl @@ -0,0 +1,115 @@ +%%% -*- erlang -*- +%%% +%%% This file is part of hackney released under the Apache 2 license. +%%% See the NOTICE for more information. +%%% +%%% Copyright (c) 2012-2026 Benoît Chesneau +%%% + +-module(hackney_metrics_prometheus). +-author("benoitc"). + +-behaviour(hackney_metrics_backend). + +%% hackney_metrics_backend callbacks +-export([ + counter_inc/2, + counter_inc/3, + gauge_set/3, + gauge_inc/2, + gauge_dec/2, + histogram_observe/3, + declare_counter/3, + declare_gauge/3, + declare_histogram/3, + declare_histogram/4 +]). + +%% Default histogram buckets for duration metrics (in seconds) +-define(DEFAULT_BUCKETS, [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]). + +%% Counter operations +counter_inc(Name, Labels) -> + counter_inc(Name, Labels, 1). + +counter_inc(Name, Labels, Value) -> + try + prometheus_counter:inc(Name, labels_to_list(Labels), Value) + catch + _:_ -> ok + end. + +%% Gauge operations +gauge_set(Name, Labels, Value) -> + try + prometheus_gauge:set(Name, labels_to_list(Labels), Value) + catch + _:_ -> ok + end. + +gauge_inc(Name, Labels) -> + try + prometheus_gauge:inc(Name, labels_to_list(Labels)) + catch + _:_ -> ok + end. + +gauge_dec(Name, Labels) -> + try + prometheus_gauge:dec(Name, labels_to_list(Labels)) + catch + _:_ -> ok + end. + +%% Histogram operations +histogram_observe(Name, Labels, Value) -> + try + prometheus_histogram:observe(Name, labels_to_list(Labels), Value) + catch + _:_ -> ok + end. + +%% Metric lifecycle - declarations are idempotent in prometheus +declare_counter(Name, Help, LabelKeys) -> + try + prometheus_counter:declare([ + {name, Name}, + {help, Help}, + {labels, LabelKeys} + ]) + catch + _:_ -> ok + end. + +declare_gauge(Name, Help, LabelKeys) -> + try + prometheus_gauge:declare([ + {name, Name}, + {help, Help}, + {labels, LabelKeys} + ]) + catch + _:_ -> ok + end. + +declare_histogram(Name, Help, LabelKeys) -> + declare_histogram(Name, Help, LabelKeys, ?DEFAULT_BUCKETS). + +declare_histogram(Name, Help, LabelKeys, Buckets) -> + try + prometheus_histogram:declare([ + {name, Name}, + {help, Help}, + {labels, LabelKeys}, + {buckets, Buckets} + ]) + catch + _:_ -> ok + end. + +%% Internal helpers + +%% Convert a map of labels to a list of values in the order expected by prometheus +%% The order is determined by the keys when sorted alphabetically +labels_to_list(Labels) when is_map(Labels) -> + lists:map(fun({_K, V}) -> V end, lists:keysort(1, maps:to_list(Labels))). diff --git a/src/hackney_pool.erl b/src/hackney_pool.erl index 870977df..515314d4 100644 --- a/src/hackney_pool.erl +++ b/src/hackney_pool.erl @@ -63,7 +63,6 @@ -record(state, { name, - metrics, max_connections, keepalive_timeout, prewarm_count, @@ -399,10 +398,10 @@ init([Name, Options]) -> %% register the module ets:insert(?MODULE, {Name, self()}), - %% initialize metrics - Engine = init_metrics(Name), + %% initialize pool metrics + hackney_metrics:declare_pool_metrics(Name), - {ok, #state{name=Name, metrics=Engine, max_connections=MaxConn, + {ok, #state{name=Name, max_connections=MaxConn, keepalive_timeout=KeepaliveTimeout, prewarm_count=PrewarmCount}}. handle_call(stats, _From, State) -> @@ -439,7 +438,7 @@ handle_call({host_stats, Host, Port}, _From, #state{available=Available, in_use= {reply, {InUseCount, FreeCount}, State}; handle_call({checkout, Key, Requester, Opts}, _From, State) -> - #state{name=PoolName, metrics=Engine, max_connections=MaxConn, + #state{name=PoolName, max_connections=MaxConn, available=Available, in_use=InUse} = State, TotalInUse = maps:size(InUse), @@ -452,7 +451,10 @@ handle_call({checkout, Key, Requester, Opts}, _From, State) -> %% Found an available connection - update owner to new requester ?report_debug("pool: reusing connection", [{pool, PoolName}, {pid, Pid}]), ok = hackney_conn:set_owner(Pid, Requester), - _ = metrics:update_meter(Engine, [hackney_pool, PoolName, take_rate], 1), + Labels = #{pool => PoolName}, + _ = hackney_metrics:counter_inc(hackney_pool_checkouts_total, Labels), + _ = hackney_metrics:gauge_inc(hackney_pool_in_use_count, Labels), + _ = hackney_metrics:gauge_dec(hackney_pool_free_count, Labels), InUse2 = maps:put(Pid, Key, InUse), {reply, {ok, Pid}, State#state{available=Available2, in_use=InUse2}}; none when TotalInUse >= MaxConn -> @@ -465,6 +467,9 @@ handle_call({checkout, Key, Requester, Opts}, _From, State) -> ?report_trace("pool: starting new connection", [{pool, PoolName}]), case start_connection(Key, Requester, Opts, State) of {ok, Pid, State2} -> + Labels = #{pool => PoolName}, + _ = hackney_metrics:counter_inc(hackney_pool_checkouts_total, Labels), + _ = hackney_metrics:gauge_inc(hackney_pool_in_use_count, Labels), InUse2 = maps:put(Pid, Key, State2#state.in_use), {reply, {ok, Pid}, State2#state{in_use=InUse2}}; {error, Reason} -> @@ -660,7 +665,7 @@ handle_info(_Info, State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -terminate(_Reason, #state{name=PoolName, metrics=Engine, available=Available, +terminate(_Reason, #state{name=PoolName, available=Available, h2_connections=H2Conns, h3_connections=H3Conns, pid_monitors=PidMonitors}) -> %% Stop all available connections @@ -692,8 +697,10 @@ terminate(_Reason, #state{name=PoolName, metrics=Engine, available=Available, %% Demonitor all maps:foreach(fun(_Pid, MonRef) -> erlang:demonitor(MonRef, [flush]) end, PidMonitors), - %% delete pool metrics - ok = delete_metrics(Engine, PoolName), + %% Reset pool metrics to zero + Labels = #{pool => PoolName}, + _ = hackney_metrics:gauge_set(hackney_pool_in_use_count, Labels, 0), + _ = hackney_metrics:gauge_set(hackney_pool_free_count, Labels, 0), ok. %%==================================================================== @@ -776,7 +783,7 @@ start_connection(Key, Owner, Opts, State) -> %% Only TCP connections are stored. SSL upgraded connections are closed. %% Always releases the load_regulation slot since connection is no longer in use. do_checkin(Pid, State) -> - #state{in_use=InUse, available=Available, pid_monitors=PidMonitors} = State, + #state{name=PoolName, in_use=InUse, available=Available, pid_monitors=PidMonitors} = State, %% Get the key from in_use and remove case maps:take(Pid, InUse) of @@ -785,6 +792,10 @@ do_checkin(Pid, State) -> {Host, Port, _Transport} = Key, hackney_load_regulation:release(Host, Port), + %% Update metrics - connection no longer in use + Labels = #{pool => PoolName}, + _ = hackney_metrics:gauge_dec(hackney_pool_in_use_count, Labels), + %% Check if connection is still alive case is_process_alive(Pid) of true -> @@ -815,6 +826,9 @@ do_checkin(Pid, State) -> hackney_conn:set_owner_async(Pid, self()), Available2 = maps:update_with(Key, fun(Pids) -> [Pid | Pids] end, [Pid], Available), + %% Update metrics - connection now free + _ = hackney_metrics:gauge_inc(hackney_pool_free_count, Labels), + %% Ensure we're monitoring this pid PidMonitors2 = case maps:is_key(Pid, PidMonitors) of true -> PidMonitors; @@ -838,7 +852,7 @@ do_checkin(Pid, State) -> %% Used for sync checkin to prevent deadlock when connection calls pool. %% ShouldClose is true if connection was SSL upgraded or is a proxy tunnel (no_reuse). do_checkin_with_close_flag(Pid, ShouldClose, State) -> - #state{in_use=InUse, available=Available, pid_monitors=PidMonitors} = State, + #state{name=PoolName, in_use=InUse, available=Available, pid_monitors=PidMonitors} = State, %% Get the key from in_use and remove case maps:take(Pid, InUse) of @@ -847,6 +861,10 @@ do_checkin_with_close_flag(Pid, ShouldClose, State) -> {Host, Port, _Transport} = Key, hackney_load_regulation:release(Host, Port), + %% Update metrics - connection no longer in use + Labels = #{pool => PoolName}, + _ = hackney_metrics:gauge_dec(hackney_pool_in_use_count, Labels), + %% Check if connection is still alive case is_process_alive(Pid) of true -> @@ -873,6 +891,9 @@ do_checkin_with_close_flag(Pid, ShouldClose, State) -> hackney_conn:set_owner_async(Pid, self()), Available2 = maps:update_with(Key, fun(Pids) -> [Pid | Pids] end, [Pid], Available), + %% Update metrics - connection now free + _ = hackney_metrics:gauge_inc(hackney_pool_free_count, Labels), + %% Ensure we're monitoring this pid PidMonitors2 = case maps:is_key(Pid, PidMonitors) of true -> PidMonitors; @@ -1024,23 +1045,6 @@ prewarm_connections(PoolPid, Host, Port, Count, IdleTimeout) -> end, prewarm_connections(PoolPid, Host, Port, Count - 1, IdleTimeout). -init_metrics(PoolName) -> - Engine = hackney_metrics:get_engine(), - _ = metrics:new(Engine, histogram, [hackney_pool, PoolName, take_rate]), - _ = metrics:new(Engine, counter, [hackney_pool, PoolName, no_socket]), - _ = metrics:new(Engine, histogram, [hackney_pool, PoolName, in_use_count]), - _ = metrics:new(Engine, histogram, [hackney_pool, PoolName, free_count]), - _ = metrics:new(Engine, histogram, [hackney_pool, PoolName, queue_count]), - Engine. - -delete_metrics(Engine, PoolName) -> - _ = metrics:delete(Engine, [hackney_pool, PoolName, take_rate]), - _ = metrics:delete(Engine, [hackney_pool, PoolName, no_socket]), - _ = metrics:delete(Engine, [hackney_pool, PoolName, in_use_count]), - _ = metrics:delete(Engine, [hackney_pool, PoolName, free_count]), - _ = metrics:delete(Engine, [hackney_pool, PoolName, queue_count]), - ok. - handle_stats(State) -> #state{name=PoolName, max_connections=Max, available=Available, in_use=InUse} = State, diff --git a/src/hackney_util.erl b/src/hackney_util.erl index 46581e88..f44bcfd5 100644 --- a/src/hackney_util.erl +++ b/src/hackney_util.erl @@ -103,12 +103,11 @@ privdir() -> end. mod_metrics() -> - case application:get_env(hackney, mod_metrics) of - {ok, folsom} -> metrics_folsom; - {ok, exometer} -> metrics_exometer; - {ok, dummy} -> metrics_dummy; + case application:get_env(hackney, metrics_backend) of + {ok, prometheus} -> hackney_metrics_prometheus; + {ok, dummy} -> hackney_metrics_dummy; {ok, Mod} -> Mod; - _ -> metrics_dummy + undefined -> hackney_metrics_dummy end. %% @doc Get the default protocols for HTTP connections. diff --git a/test/hackney_metrics_tests.erl b/test/hackney_metrics_tests.erl index 14bdcb31..e1df93e0 100644 --- a/test/hackney_metrics_tests.erl +++ b/test/hackney_metrics_tests.erl @@ -16,28 +16,42 @@ metrics_test_() -> fun setup/0, fun teardown/1, [ - {"init inserts mod_metrics into ETS", + {"init inserts metrics_backend into ETS", fun() -> hackney_metrics:init(), - %% Verify that mod_metrics key exists in the ETS table - Result = ets:lookup(?CONFIG, mod_metrics), - ?assertMatch([{mod_metrics, _}], Result) + %% Verify that metrics_backend key exists in the ETS table + Result = ets:lookup(?CONFIG, metrics_backend), + ?assertMatch([{metrics_backend, _}], Result) end}, - {"get_engine returns the metrics engine", + {"get_backend returns the metrics backend module", fun() -> hackney_metrics:init(), - Engine = hackney_metrics:get_engine(), - %% Should return a metrics module reference - ?assert(is_tuple(Engine) orelse is_atom(Engine)) + Backend = hackney_metrics:get_backend(), + %% Should return a metrics module (atom) + ?assert(is_atom(Backend)) end}, - {"init with dummy metrics", + {"init uses dummy backend by default", fun() -> %% Ensure we're using dummy metrics (default) - application:set_env(hackney, mod_metrics, dummy), + application:unset_env(hackney, metrics_backend), hackney_metrics:init(), - Engine = hackney_metrics:get_engine(), - ?assert(is_tuple(Engine) orelse is_atom(Engine)), - application:unset_env(hackney, mod_metrics) + Backend = hackney_metrics:get_backend(), + ?assertEqual(hackney_metrics_dummy, Backend) + end}, + {"dummy backend counter_inc works", + fun() -> + %% counter_inc should not crash + ?assertEqual(ok, hackney_metrics:counter_inc(test_counter, #{host => <<"test">>})) + end}, + {"dummy backend gauge_set works", + fun() -> + %% gauge_set should not crash + ?assertEqual(ok, hackney_metrics:gauge_set(test_gauge, #{pool => default}, 42)) + end}, + {"dummy backend histogram_observe works", + fun() -> + %% histogram_observe should not crash + ?assertEqual(ok, hackney_metrics:histogram_observe(test_histogram, #{host => <<"test">>}, 0.5)) end} ]}. @@ -53,5 +67,5 @@ setup() -> teardown(_) -> %% Clean up the ETS table entry - catch ets:delete(?CONFIG, mod_metrics), + catch ets:delete(?CONFIG, metrics_backend), ok.