From 7e769145993d93b25eae7df2607c6c150ac6a347 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 3 Jul 2023 23:46:17 +0200 Subject: [PATCH 01/73] feat(ds): hardwire emqx_durable_storage message persistence Only message persistence is currently implemented, irrespectively of whether there are persistent sessions around or not. --- apps/emqx/rebar.config | 1 + apps/emqx/src/emqx.app.src | 3 +- apps/emqx/src/emqx_app.erl | 1 + apps/emqx/src/emqx_broker.erl | 1 + apps/emqx/src/emqx_persistent_session_ds.erl | 87 +++++++++++++ apps/emqx/src/emqx_schema.erl | 8 ++ .../test/emqx_persistent_messages_SUITE.erl | 123 ++++++++++++++++++ 7 files changed, 223 insertions(+), 1 deletion(-) create mode 100644 apps/emqx/src/emqx_persistent_session_ds.erl create mode 100644 apps/emqx/test/emqx_persistent_messages_SUITE.erl diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 0278a1b1d..1082b8e6b 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -23,6 +23,7 @@ %% `git_subdir` dependency in other projects. {deps, [ {emqx_utils, {path, "../emqx_utils"}}, + {emqx_durable_storage, {path, "../emqx_durable_storage"}}, {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index 928539f46..47f1ae4b4 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -16,7 +16,8 @@ sasl, os_mon, lc, - hocon + hocon, + emqx_durable_storage ]}, {mod, {emqx_app, []}}, {env, []}, diff --git a/apps/emqx/src/emqx_app.erl b/apps/emqx/src/emqx_app.erl index 038c93283..7881fd8fe 100644 --- a/apps/emqx/src/emqx_app.erl +++ b/apps/emqx/src/emqx_app.erl @@ -37,6 +37,7 @@ start(_Type, _Args) -> ok = maybe_load_config(), ok = emqx_persistent_session:init_db_backend(), + _ = emqx_persistent_session_ds:init(), ok = maybe_start_quicer(), ok = emqx_bpapi:start(), ok = emqx_alarm_handler:load(), diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index fef93768b..859f6fc91 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -225,6 +225,7 @@ publish(Msg) when is_record(Msg, message) -> []; Msg1 = #message{topic = Topic} -> emqx_persistent_session:persist_message(Msg1), + _ = emqx_persistent_session_ds:persist_message(Msg1), route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1)) end. diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl new file mode 100644 index 000000000..3bfa82298 --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -0,0 +1,87 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2021-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_persistent_session_ds). + +-export([init/0]). + +-export([persist_message/1]). + +-export([ + serialize_message/1, + deserialize_message/1 +]). + +%% FIXME +-define(DS_SHARD, <<"local">>). + +-define(WHEN_ENABLED(DO), + case is_store_enabled() of + true -> DO; + false -> {skipped, disabled} + end +). + +%% + +init() -> + ?WHEN_ENABLED( + begin + _ = emqx_ds_storage_layer_sup:start_shard(?DS_SHARD), + ok + end + ). + +%% + +-spec persist_message(emqx_types:message()) -> + ok | {skipped, _Reason} | {error, _TODO}. +persist_message(Msg) -> + ?WHEN_ENABLED( + case needs_persistence(Msg) andalso find_subscribers(Msg) of + [_ | _] -> + store_message(Msg); + % [] -> + % {skipped, no_subscribers}; + false -> + {skipped, needs_no_persistence} + end + ). + +needs_persistence(Msg) -> + not (emqx_message:get_flag(dup, Msg) orelse emqx_message:is_sys(Msg)). + +store_message(Msg) -> + ID = emqx_message:id(Msg), + Timestamp = emqx_guid:timestamp(ID), + Topic = emqx_topic:words(emqx_message:topic(Msg)), + emqx_ds_storage_layer:store(?DS_SHARD, ID, Timestamp, Topic, serialize_message(Msg)). + +find_subscribers(_Msg) -> + [node()]. + +%% + +serialize_message(Msg) -> + term_to_binary(emqx_message:to_map(Msg)). + +deserialize_message(Bin) -> + emqx_message:from_map(binary_to_term(Bin)). + +%% + +is_store_enabled() -> + emqx_config:get([persistent_session_store, ds]). diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index f552fae7f..477febe25 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -319,6 +319,14 @@ fields("persistent_session_store") -> desc => ?DESC(persistent_session_store_enabled) } )}, + {"ds", + sc( + boolean(), + #{ + default => false, + importance => ?IMPORTANCE_HIDDEN + } + )}, {"on_disc", sc( boolean(), diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl new file mode 100644 index 000000000..2a8db4a9b --- /dev/null +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -0,0 +1,123 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_persistent_messages_SUITE). + +-include_lib("stdlib/include/assert.hrl"). + +-compile(export_all). +-compile(nowarn_export_all). + +-define(NOW, + (calendar:system_time_to_rfc3339(erlang:system_time(millisecond), [{unit, millisecond}])) +). + +-define(HERE(FMT, ARGS), + io:format( + user, + "*** " ?MODULE_STRING ":~p/~p ~s @ ~p *** " ++ FMT ++ "~n", + [?FUNCTION_NAME, ?FUNCTION_ARITY, ?NOW, node() | ARGS] + ) +). + +all() -> + [t_messages_persisted]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(emqx_durable_storage), + ok = emqx_common_test_helpers:start_apps([], fun + (emqx) -> + emqx_common_test_helpers:boot_modules(all), + emqx_config:init_load(emqx_schema, <<"persistent_session_store.ds = true">>), + emqx_app:set_config_loader(?MODULE); + (_) -> + ok + end), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([]), + ok. + +t_messages_persisted(_Config) -> + C1 = connect(<>, true, 30), + C2 = connect(<>, false, 60), + C3 = connect(<>, false, undefined), + C4 = connect(<>, false, 0), + + CP = connect(<>, true, undefined), + + {ok, _, [1]} = emqtt:subscribe(C1, <<"client/+/topic">>, qos1), + {ok, _, [0]} = emqtt:subscribe(C2, <<"client/+/topic">>, qos0), + {ok, _, [1]} = emqtt:subscribe(C2, <<"random/+">>, qos1), + {ok, _, [2]} = emqtt:subscribe(C3, <<"client/#">>, qos2), + {ok, _, [0]} = emqtt:subscribe(C4, <<"random/#">>, qos0), + + Messages = [ + M1 = {<<"client/1/topic">>, <<"1">>}, + M2 = {<<"client/2/topic">>, <<"2">>}, + M3 = {<<"client/3/topic/sub">>, <<"3">>}, + M4 = {<<"client/4">>, <<"4">>}, + M5 = {<<"random/5">>, <<"5">>}, + M6 = {<<"random/6/topic">>, <<"6">>}, + M7 = {<<"client/7/topic">>, <<"7">>}, + M8 = {<<"client/8/topic/sub">>, <<"8">>}, + M9 = {<<"random/9">>, <<"9">>}, + M10 = {<<"random/10">>, <<"10">>} + ], + + Results = [emqtt:publish(CP, Topic, Payload, 1) || {Topic, Payload} <- Messages], + + ?HERE("Results = ~p", [Results]), + + Persisted = consume(<<"local">>, {['#'], 0}), + + ?HERE("Persisted = ~p", [Persisted]), + + ?assertEqual( + % [M1, M2, M5, M7, M9, M10], + [M1, M2, M3, M4, M5, M6, M7, M8, M9, M10], + [{emqx_message:topic(M), emqx_message:payload(M)} || M <- Persisted] + ), + + ok. + +%% + +connect(ClientId, CleanStart, EI) -> + {ok, Client} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {clean_start, CleanStart}, + {properties, + maps:from_list( + [{'Session-Expiry-Interval', EI} || is_integer(EI)] + )} + ]), + {ok, _} = emqtt:connect(Client), + Client. + +consume(Shard, Replay) -> + {ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay), + consume(It). + +consume(It) -> + case emqx_ds_storage_layer:next(It) of + {value, Msg, NIt} -> + [emqx_persistent_session_ds:deserialize_message(Msg) | consume(NIt)]; + none -> + [] + end. From ac56de9fc57b591b44fdc9618d8100d7d4c339c8 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 6 Jul 2023 17:42:24 +0200 Subject: [PATCH 02/73] feat(ds): allow to redefine directory for shard's rocksdb --- .../src/emqx_ds_message_storage_bitmask.erl | 2 +- .../src/emqx_ds_storage_layer.erl | 40 +++++++++++-------- .../src/emqx_ds_storage_layer_sup.erl | 16 ++++---- .../test/emqx_ds_storage_layer_SUITE.erl | 4 +- 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_message_storage_bitmask.erl b/apps/emqx_durable_storage/src/emqx_ds_message_storage_bitmask.erl index 5bb0423d5..57608e5cb 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_message_storage_bitmask.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_message_storage_bitmask.erl @@ -175,7 +175,7 @@ cf :: rocksdb:cf_handle(), keymapper :: keymapper(), write_options = [{sync, true}] :: emqx_ds_storage_layer:db_write_options(), - read_options = [] :: emqx_ds_storage_layer:db_write_options() + read_options = [] :: emqx_ds_storage_layer:db_read_options() }). -record(it, { diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl index 43a399a1b..017423b02 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl @@ -6,7 +6,7 @@ -behaviour(gen_server). %% API: --export([start_link/1]). +-export([start_link/2]). -export([create_generation/3]). -export([store/5]). @@ -18,7 +18,8 @@ %% behaviour callbacks: -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). --export_type([cf_refs/0, gen_id/0, db_write_options/0, state/0, iterator/0]). +-export_type([cf_refs/0, gen_id/0, options/0, state/0, iterator/0]). +-export_type([db_options/0, db_write_options/0, db_read_options/0]). -compile({inline, [meta_lookup/2]}). @@ -26,10 +27,16 @@ %% Type declarations %%================================================================================ -%% see rocksdb:db_options() -% -type options() :: proplists:proplist(). +-type options() :: #{ + dir => file:filename() +}. +%% see rocksdb:db_options() +-type db_options() :: proplists:proplist(). +%% see rocksdb:write_options() -type db_write_options() :: proplists:proplist(). +%% see rocksdb:read_options() +-type db_read_options() :: proplists:proplist(). -type cf_refs() :: [{string(), rocksdb:cf_handle()}]. @@ -110,18 +117,16 @@ %% API funcions %%================================================================================ --spec start_link(emqx_ds:shard()) -> {ok, pid()}. -start_link(Shard) -> - gen_server:start_link(?REF(Shard), ?MODULE, [Shard], []). +-spec start_link(emqx_ds:shard(), emqx_ds_storage_layer:options()) -> {ok, pid()}. +start_link(Shard, Options) -> + gen_server:start_link(?REF(Shard), ?MODULE, {Shard, Options}, []). -spec create_generation(emqx_ds:shard(), emqx_ds:time(), emqx_ds_conf:backend_config()) -> {ok, gen_id()} | {error, nonmonotonic}. create_generation(Shard, Since, Config = {_Module, _Options}) -> gen_server:call(?REF(Shard), {create_generation, Since, Config}). --spec store( - emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary() -) -> +-spec store(emqx_ds:shard(), emqx_guid:guid(), emqx_ds:time(), emqx_ds:topic(), binary()) -> ok | {error, _}. store(Shard, GUID, Time, Topic, Msg) -> {_GenId, #{module := Mod, data := Data}} = meta_lookup_gen(Shard, Time), @@ -181,9 +186,9 @@ discard_iterator(Shard, ReplayID) -> %% behaviour callbacks %%================================================================================ -init([Shard]) -> +init({Shard, Options}) -> process_flag(trap_exit, true), - {ok, S0} = open_db(Shard), + {ok, S0} = open_db(Shard, Options), S = ensure_current_generation(S0), ok = populate_metadata(S), {ok, S}. @@ -265,16 +270,17 @@ create_gen(GenId, Since, {Module, Options}, S = #s{db = DBHandle, cf_generations }, {ok, Gen, S#s{cf_generations = NewCFs ++ CFs}}. --spec open_db(emqx_ds:shard()) -> {ok, state()} | {error, _TODO}. -open_db(Shard) -> - Filename = binary_to_list(Shard), +-spec open_db(emqx_ds:shard(), options()) -> {ok, state()} | {error, _TODO}. +open_db(Shard, Options) -> + DBDir = unicode:characters_to_list(maps:get(dir, Options, Shard)), DBOptions = [ {create_if_missing, true}, {create_missing_column_families, true} | emqx_ds_conf:db_options() ], + _ = filelib:ensure_dir(DBDir), ExistingCFs = - case rocksdb:list_column_families(Filename, DBOptions) of + case rocksdb:list_column_families(DBDir, DBOptions) of {ok, CFs} -> [{Name, []} || Name <- CFs, Name /= ?DEFAULT_CF, Name /= ?ITERATOR_CF]; % DB is not present. First start @@ -286,7 +292,7 @@ open_db(Shard) -> {?ITERATOR_CF, ?ITERATOR_CF_OPTS} | ExistingCFs ], - case rocksdb:open(Filename, DBOptions, ColumnFamilies) of + case rocksdb:open(DBDir, DBOptions, ColumnFamilies) of {ok, DBHandle, [_CFDefault, CFIterator | CFRefs]} -> {CFNames, _} = lists:unzip(ExistingCFs), {ok, #s{ diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_layer_sup.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_layer_sup.erl index ed745df5f..56c8c760a 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_layer_sup.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_layer_sup.erl @@ -6,7 +6,7 @@ -behaviour(supervisor). %% API: --export([start_link/0, start_shard/1, stop_shard/1]). +-export([start_link/0, start_shard/2, stop_shard/1]). %% behaviour callbacks: -export([init/1]). @@ -25,9 +25,10 @@ start_link() -> supervisor:start_link({local, ?SUP}, ?MODULE, []). --spec start_shard(emqx_ds:shard()) -> supervisor:startchild_ret(). -start_shard(Shard) -> - supervisor:start_child(?SUP, shard_child_spec(Shard)). +-spec start_shard(emqx_ds:shard(), emqx_ds_storage_layer:options()) -> + supervisor:startchild_ret(). +start_shard(Shard, Options) -> + supervisor:start_child(?SUP, shard_child_spec(Shard, Options)). -spec stop_shard(emqx_ds:shard()) -> ok | {error, _}. stop_shard(Shard) -> @@ -51,11 +52,12 @@ init([]) -> %% Internal functions %%================================================================================ --spec shard_child_spec(emqx_ds:shard()) -> supervisor:child_spec(). -shard_child_spec(Shard) -> +-spec shard_child_spec(emqx_ds:shard(), emqx_ds_storage_layer:options()) -> + supervisor:child_spec(). +shard_child_spec(Shard, Options) -> #{ id => Shard, - start => {emqx_ds_storage_layer, start_link, [Shard]}, + start => {emqx_ds_storage_layer, start_link, [Shard, Options]}, shutdown => 5_000, restart => permanent, type => worker diff --git a/apps/emqx_durable_storage/test/emqx_ds_storage_layer_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_storage_layer_SUITE.erl index 46a1436bb..c5c227333 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_storage_layer_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_storage_layer_SUITE.erl @@ -33,7 +33,7 @@ %% Smoke test for opening and reopening the database t_open(_Config) -> ok = emqx_ds_storage_layer_sup:stop_shard(?SHARD), - {ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD). + {ok, _} = emqx_ds_storage_layer_sup:start_shard(?SHARD, #{}). %% Smoke test of store function t_store(_Config) -> @@ -263,7 +263,7 @@ end_per_suite(_Config) -> init_per_testcase(TC, Config) -> ok = set_shard_config(shard(TC), ?DEFAULT_CONFIG), - {ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC)), + {ok, _} = emqx_ds_storage_layer_sup:start_shard(shard(TC), #{}), Config. end_per_testcase(TC, _Config) -> From daf4e86da113653c19ae7612865a8d1c73d6bff6 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 6 Jul 2023 17:43:12 +0200 Subject: [PATCH 03/73] feat(ds): add `ensure_shard/2` shortcut And use it in `emqx_persistent_session_ds:init()` backend initialization function. --- apps/emqx/src/emqx_persistent_session_ds.erl | 7 +++---- apps/emqx_durable_storage/src/emqx_ds.erl | 13 +++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 3bfa82298..27b4f0950 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -39,10 +39,9 @@ init() -> ?WHEN_ENABLED( - begin - _ = emqx_ds_storage_layer_sup:start_shard(?DS_SHARD), - ok - end + ok = emqx_ds:ensure_shard(?DS_SHARD, #{ + dir => filename:join([emqx:data_dir(), ds, messages, ?DS_SHARD]) + }) ). %% diff --git a/apps/emqx_durable_storage/src/emqx_ds.erl b/apps/emqx_durable_storage/src/emqx_ds.erl index 230ca3f9f..9eccf8c16 100644 --- a/apps/emqx_durable_storage/src/emqx_ds.erl +++ b/apps/emqx_durable_storage/src/emqx_ds.erl @@ -16,6 +16,7 @@ -module(emqx_ds). %% API: +-export([ensure_shard/2]). %% Messages: -export([message_store/2, message_store/1, message_stats/0]). %% Iterator: @@ -79,6 +80,18 @@ %% API funcions %%================================================================================ +-spec ensure_shard(shard(), emqx_ds_storage_layer:options()) -> + ok | {error, _Reason}. +ensure_shard(Shard, Options) -> + case emqx_ds_storage_layer_sup:start_shard(Shard, Options) of + {ok, _Pid} -> + ok; + {error, {already_started, _Pid}} -> + ok; + {error, Reason} -> + {error, Reason} + end. + %%-------------------------------------------------------------------------------- %% Message %%-------------------------------------------------------------------------------- From 030beb3e6a6c9ce7763fd4c96b3960ee41a05738 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 6 Jul 2023 17:47:26 +0200 Subject: [PATCH 04/73] fix(ds): drop debug printouts in test suite --- apps/emqx/test/emqx_persistent_messages_SUITE.erl | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index 2a8db4a9b..845765778 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -25,14 +25,6 @@ (calendar:system_time_to_rfc3339(erlang:system_time(millisecond), [{unit, millisecond}])) ). --define(HERE(FMT, ARGS), - io:format( - user, - "*** " ?MODULE_STRING ":~p/~p ~s @ ~p *** " ++ FMT ++ "~n", - [?FUNCTION_NAME, ?FUNCTION_ARITY, ?NOW, node() | ARGS] - ) -). - all() -> [t_messages_persisted]. @@ -81,11 +73,11 @@ t_messages_persisted(_Config) -> Results = [emqtt:publish(CP, Topic, Payload, 1) || {Topic, Payload} <- Messages], - ?HERE("Results = ~p", [Results]), + ct:pal("Results = ~p", [Results]), Persisted = consume(<<"local">>, {['#'], 0}), - ?HERE("Persisted = ~p", [Persisted]), + ct:pal("Persisted = ~p", [Persisted]), ?assertEqual( % [M1, M2, M5, M7, M9, M10], From 8c883feb546310c147f54cbf2f4cef2e2db700a3 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Fri, 7 Jul 2023 13:47:03 +0200 Subject: [PATCH 05/73] chore: bump `emqx_durable_storage` to 0.1.1 --- apps/emqx_durable_storage/src/emqx_durable_storage.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src index 7ea036536..944477306 100644 --- a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src +++ b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src @@ -2,7 +2,7 @@ {application, emqx_durable_storage, [ {description, "Message persistence and subscription replays for EMQX"}, % strict semver, bump manually! - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, rocksdb, gproc, mria]}, From a43bf5e4fa4b4797fe1fa1b2f8650ca73a25b9f7 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Thu, 22 Jun 2023 14:45:17 +0200 Subject: [PATCH 06/73] ci: release Amazon Linux 2023 packages --- .github/workflows/build_and_push_docker_images.yaml | 4 ++-- .github/workflows/build_packages.yaml | 9 +++++---- .github/workflows/build_packages_cron.yaml | 4 ++-- .github/workflows/build_slim_packages.yaml | 4 ++-- .github/workflows/check_deps_integrity.yaml | 2 +- .github/workflows/code_style_check.yaml | 2 +- .github/workflows/elixir_apps_check.yaml | 2 +- .github/workflows/elixir_deps_check.yaml | 2 +- .github/workflows/elixir_release.yml | 2 +- .github/workflows/performance_test.yaml | 2 +- .github/workflows/release.yaml | 6 ++++-- .github/workflows/run_conf_tests.yaml | 2 +- .github/workflows/run_emqx_app_tests.yaml | 2 +- .github/workflows/run_fvt_tests.yaml | 6 +++--- .github/workflows/run_relup_tests.yaml | 2 +- .github/workflows/run_test_cases.yaml | 6 +++--- 16 files changed, 30 insertions(+), 27 deletions(-) diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 3ee9b79c7..3dd00a9d7 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -25,7 +25,7 @@ jobs: prepare: runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" outputs: PROFILE: ${{ steps.get_profile.outputs.PROFILE }} @@ -120,7 +120,7 @@ jobs: # NOTE: 'otp' and 'elixir' are to configure emqx-builder image # only support latest otp and elixir, not a matrix builder: - - 5.1-0 # update to latest + - 5.1-1 # update to latest otp: - 25.3.2-1 elixir: diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index e9071db98..8af46df09 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -21,7 +21,7 @@ on: jobs: prepare: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} @@ -187,11 +187,12 @@ jobs: - el8 - el7 - amzn2 + - amzn2023 build_machine: - aws-arm64 - ubuntu-22.04 builder: - - 5.1-0 + - 5.1-1 elixir: - 1.14.5 with_elixir: @@ -207,7 +208,7 @@ jobs: arch: amd64 os: ubuntu22.04 build_machine: ubuntu-22.04 - builder: 5.1-0 + builder: 5.1-1 elixir: 1.14.5 with_elixir: 'yes' - profile: emqx @@ -215,7 +216,7 @@ jobs: arch: amd64 os: amzn2 build_machine: ubuntu-22.04 - builder: 5.1-0 + builder: 5.1-1 elixir: 1.14.5 with_elixir: 'yes' diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml index 8efe249b8..3f8e728e0 100644 --- a/.github/workflows/build_packages_cron.yaml +++ b/.github/workflows/build_packages_cron.yaml @@ -30,9 +30,9 @@ jobs: - amd64 os: - debian10 - - amzn2 + - amzn2023 builder: - - 5.1-0 + - 5.1-1 elixir: - 1.14.5 diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 29e7bb13a..50bb83451 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -32,10 +32,10 @@ jobs: profile: - ["emqx", "25.3.2-1", "el7", "erlang"] - ["emqx", "25.3.2-1", "ubuntu22.04", "elixir"] - - ["emqx-enterprise", "25.3.2-1", "amzn2", "erlang"] + - ["emqx-enterprise", "25.3.2-1", "amzn2023", "erlang"] - ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"] builder: - - 5.1-0 + - 5.1-1 elixir: - '1.14.5' diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index 199a49f21..02b43d16d 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -6,7 +6,7 @@ on: jobs: check_deps_integrity: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index e94f2de17..13046b255 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -5,7 +5,7 @@ on: [pull_request] jobs: code_style_check: runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 8e29181f5..840311328 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -9,7 +9,7 @@ jobs: elixir_apps_check: runs-on: ubuntu-22.04 # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" strategy: fail-fast: false diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index b8364c6da..aa5e3d367 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -8,7 +8,7 @@ on: jobs: elixir_deps_check: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 steps: - name: Checkout diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index af4805d06..8e95c6746 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -17,7 +17,7 @@ jobs: profile: - emqx - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml index 1fbc25267..73e9dc91f 100644 --- a/.github/workflows/performance_test.yaml +++ b/.github/workflows/performance_test.yaml @@ -23,7 +23,7 @@ jobs: prepare: runs-on: ubuntu-latest if: github.repository_owner == 'emqx' - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu20.04 outputs: BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 586142bbe..c3455e81b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -93,14 +93,16 @@ jobs: push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb" push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb" - push "el/6" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm" - push "el/6" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm" push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm" push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm" push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm" push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm" push "el/9" "packages/$PROFILE-$VERSION-el9-arm64.rpm" + push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-amd64.rpm" + push "amazon/2" "packages/$PROFILE-$VERSION-amzn2-arm64.rpm" + push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-amd64.rpm" + push "amazon/2023" "packages/$PROFILE-$VERSION-amzn2023-arm64.rpm" rerun-apps-version-check: runs-on: ubuntu-22.04 diff --git a/.github/workflows/run_conf_tests.yaml b/.github/workflows/run_conf_tests.yaml index 3e7c39ae9..91b82329b 100644 --- a/.github/workflows/run_conf_tests.yaml +++ b/.github/workflows/run_conf_tests.yaml @@ -26,7 +26,7 @@ jobs: profile: - emqx - emqx-enterprise - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: AutoModality/action-clean@v1 - uses: actions/checkout@v3 diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index 3ac939519..ddb29122f 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: builder: - - 5.1-0 + - 5.1-1 otp: - 25.3.2-1 # no need to use more than 1 version of Elixir, since tests diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 50d999851..64ab63042 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -17,7 +17,7 @@ jobs: prepare: runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 + container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-debian11 steps: - uses: actions/checkout@v3 @@ -50,7 +50,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.1-0 + - 5.1-1 otp: - 25.3.2-1 elixir: @@ -123,7 +123,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.1-0 + - 5.1-1 otp: - 25.3.2-1 elixir: diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index 4b33255c6..75725e2e2 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -15,7 +15,7 @@ concurrency: jobs: relup_test_plan: runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 85b38627f..1b0df7d42 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -34,12 +34,12 @@ jobs: MATRIX="$(echo "${APPS}" | jq -c ' [ (.[] | select(.profile == "emqx") | . + { - builder: "5.1-0", + builder: "5.1-1", otp: "25.3.2-1", elixir: "1.14.5" }), (.[] | select(.profile == "emqx-enterprise") | . + { - builder: "5.1-0", + builder: "5.1-1", otp: ["25.3.2-1"][], elixir: "1.14.5" }) @@ -286,7 +286,7 @@ jobs: - ct - ct_docker runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 From 4e9c39aed284a93a97ebebab90abdbf8feaf1af0 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Thu, 22 Jun 2023 14:48:10 +0200 Subject: [PATCH 07/73] chore: add changelog --- changes/ce/feat-11124.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/feat-11124.en.md diff --git a/changes/ce/feat-11124.en.md b/changes/ce/feat-11124.en.md new file mode 100644 index 000000000..80c84e849 --- /dev/null +++ b/changes/ce/feat-11124.en.md @@ -0,0 +1 @@ +Release packages for Amazon Linux 2023 From 7f04fff2a8216b222bf4704e662c6d9b47ddb833 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Mon, 10 Jul 2023 17:34:34 +0300 Subject: [PATCH 08/73] fix(emqx_machine): add emqx_ee_schema_registry to the reboot apps list As emqx_ee_schema_registry uses Mria tables (schema_registry_shard), a node joining a cluster needs to restart this application in order to restart relevant Mria shard processes. --- apps/emqx_machine/src/emqx_machine_boot.erl | 3 ++- changes/ee/fix-11242.en.md | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changes/ee/fix-11242.en.md diff --git a/apps/emqx_machine/src/emqx_machine_boot.erl b/apps/emqx_machine/src/emqx_machine_boot.erl index b929f0d72..eb1739fd3 100644 --- a/apps/emqx_machine/src/emqx_machine_boot.erl +++ b/apps/emqx_machine/src/emqx_machine_boot.erl @@ -157,7 +157,8 @@ basic_reboot_apps_edition(ee) -> emqx_s3, emqx_ft, emqx_eviction_agent, - emqx_node_rebalance + emqx_node_rebalance, + emqx_ee_schema_registry ]; %% unexcepted edition, should not happen basic_reboot_apps_edition(_) -> diff --git a/changes/ee/fix-11242.en.md b/changes/ee/fix-11242.en.md new file mode 100644 index 000000000..14ff87bd8 --- /dev/null +++ b/changes/ee/fix-11242.en.md @@ -0,0 +1,5 @@ +Restart emqx_ee_schema_registry when a node joins a cluster. + +As emqx_ee_schema_registry uses Mria tables, a node joining a cluster needs to restart this application in order to +start relevant Mria shard processes. +This is needed to ensure a correct behaviour in Core/Replicant mode. From d91ab7dec92e12f97b368e8eeb744fea3968a1cd Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 6 Jul 2023 18:53:04 +0200 Subject: [PATCH 09/73] test(ds): make `emqx_persistent_messages_SUITE` setup cleaner Co-authored-by: Thales Macedo Garitezi --- apps/emqx/test/emqx_persistent_messages_SUITE.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index 845765778..b818e3fec 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -26,7 +26,7 @@ ). all() -> - [t_messages_persisted]. + emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> {ok, _} = application:ensure_all_started(emqx_durable_storage), @@ -42,6 +42,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]), + application:stop(emqx_durable_storage), ok. t_messages_persisted(_Config) -> From d97d5b8af7cea8de4d3f3ce8cc91f9cba750fd3b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 10 Jul 2023 14:31:25 -0300 Subject: [PATCH 10/73] ci(machine_boot): add ci check for missing reboot apps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to avoid forgetting to add an application to `emqx_machine_boot:sorted_reboot_apps`, this script checks for any calls to `mria:create_table` in all EMQX applications and checks it against said function in `emqx_machine_boot`. Example run: ``` ͳ scripts/check_missing_reboot_apps.exs --profile emqx-enterprise Some applications are missing from `emqx_machine_boot:sorted_reboot_apps/0`! Missing applications: * emqx_durable_storage * emqx_ee_schema_registry Hint: maybe add them to `emqx_machine_boot:basic_reboot_apps_edition/1` Applications that call `mria:create_table` need to be added to that list; otherwise, when a node joins a cluster, it might lose tables. ``` Example problem: https://github.com/emqx/emqx/pull/11242 --- Makefile | 1 + scripts/check_missing_reboot_apps.exs | 62 +++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100755 scripts/check_missing_reboot_apps.exs diff --git a/Makefile b/Makefile index 8c5eb3048..df3d95f62 100644 --- a/Makefile +++ b/Makefile @@ -99,6 +99,7 @@ static_checks: @$(REBAR) as check do xref, dialyzer @if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi ./scripts/check-i18n-style.sh + ./scripts/check_missing_reboot_apps.exs --profile $(PROFILE) APPS=$(shell $(SCRIPTS)/find-apps.sh) diff --git a/scripts/check_missing_reboot_apps.exs b/scripts/check_missing_reboot_apps.exs new file mode 100755 index 000000000..9ea2c7925 --- /dev/null +++ b/scripts/check_missing_reboot_apps.exs @@ -0,0 +1,62 @@ +#!/usr/bin/env elixir + +{parsed, _argv, _errors = []} = + OptionParser.parse( + System.argv(), + strict: [profile: :string] + ) + +profile = Keyword.fetch!(parsed, :profile) + +:xref.start(:xref) +:xref.set_default(:xref, warnings: false) +rel_dir = '_build/#{profile}/lib/' +:xref.add_release(:xref, rel_dir) + +{:ok, calls} = :xref.q(:xref, '(App) (XC || "mria":"create_table"/".*")') + +emqx_calls = + calls + |> Enum.map(&elem(&1, 0)) + |> Enum.filter(&(to_string(&1) =~ "emqx_")) + |> MapSet.new() + +Path.wildcard(rel_dir ++ "*/ebin") +|> Enum.each(fn dir -> + dir + |> to_charlist() + |> :code.add_pathz() +end) + +Path.wildcard(rel_dir ++ "*") +|> Enum.map(fn dir -> + dir + |> Path.basename() + |> String.to_atom() + |> Application.load() +end) + +reboot_apps = :emqx_machine_boot.sorted_reboot_apps() |> MapSet.new() + +missing_reboot_apps = MapSet.difference(emqx_calls, reboot_apps) + +if MapSet.size(missing_reboot_apps) != 0 do + IO.puts( + :stderr, + IO.ANSI.format([ + :red, + "Some applications are missing from `emqx_machine_boot:sorted_reboot_apps/0`!\n", + "Missing applications:\n", + Enum.map(missing_reboot_apps, fn app -> + " * #{app}\n" + end), + "\n", + :green, + "Hint: maybe add them to `emqx_machine_boot:basic_reboot_apps_edition/1`\n", + "\n", + :yellow, + "Applications that call `mria:create_table` need to be added to that list;\n", + " otherwise, when a node joins a cluster, it might lose tables.\n" + ]) + ) +end From a53768c1d4627083a6d2ec993b0d712d3f094469 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 10 Jul 2023 14:39:44 -0300 Subject: [PATCH 11/73] fix(machine_boot): add `emqx_durable_storage` to reboot apps list --- apps/emqx_machine/src/emqx_machine_boot.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/emqx_machine/src/emqx_machine_boot.erl b/apps/emqx_machine/src/emqx_machine_boot.erl index b929f0d72..de3c67207 100644 --- a/apps/emqx_machine/src/emqx_machine_boot.erl +++ b/apps/emqx_machine/src/emqx_machine_boot.erl @@ -146,7 +146,8 @@ basic_reboot_apps() -> emqx_slow_subs, emqx_auto_subscribe, emqx_plugins, - emqx_psk + emqx_psk, + emqx_durable_storage ] ++ basic_reboot_apps_edition(emqx_release:edition()). basic_reboot_apps_edition(ce) -> From ec47be912dc1ac0fbac9210696563d170de964a8 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 10 Jul 2023 20:53:22 +0200 Subject: [PATCH 12/73] chore(ds): add review-board as `emqx_durable_storage` codeowners --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3d6ab6c37..e6d98c2e1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,7 +18,7 @@ /apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad /apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest /apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen -/apps/emqx_durable_storage/ @ieQu1 +/apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 ## CI /deploy/ @emqx/emqx-review-board @Rory-Z From 5f731d1e2999ecb15b5cac1ea83eaf21e15eee6b Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Mon, 10 Jul 2023 20:57:07 +0200 Subject: [PATCH 13/73] chore: add @keynslug as `emqx_durable_storage` codeowner Co-authored-by: Thales Macedo Garitezi --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e6d98c2e1..e42000489 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,7 +18,7 @@ /apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad /apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest /apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen -/apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 +/apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 @keynslug ## CI /deploy/ @emqx/emqx-review-board @Rory-Z From 3fa885327197e2f3abe0bdab629081116726d5cc Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 10 Jul 2023 18:00:01 -0300 Subject: [PATCH 14/73] ci(dev): fix `--ekka-epmd` flag for elixir --- dev | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev b/dev index 897aed5de..1db84f7c2 100755 --- a/dev +++ b/dev @@ -373,15 +373,15 @@ boot() { {:ok, _} = Application.ensure_all_started(:emqx_machine) ' if [ -n "${EPMD_ARGS:-}" ]; then - EPMD_ARGS_ELIXIR="--erl $EPMD_ARGS" + EPMD_ARGS_ELIXIR="$EPMD_ARGS" else - EPMD_ARGS_ELIXIR="" + EPMD_ARGS_ELIXIR="-no_op true" fi # shellcheck disable=SC2086 env APPS="$APPS" iex \ --name "$EMQX_NODE_NAME" \ - $EPMD_ARGS_ELIXIR \ + --erl "$EPMD_ARGS_ELIXIR" \ --erl '-user Elixir.IEx.CLI' \ --erl '-proto_dist ekka' \ --vm-args "$ARGS_FILE" \ From ce65abefc35b61e235d4b1c7b8e14efeafc5bff7 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Wed, 12 Jul 2023 10:45:53 +0200 Subject: [PATCH 15/73] docs: add security policy --- SECURITY.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..2274f569b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,40 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 5.1.x | :white_check_mark: | +| 5.0.x | :white_check_mark: | +| 4.4.x | :white_check_mark: | +| < 4.4 | :x: | + +## Qualifying Vulnerabilities + +Any design or implementation issue that substantially affects the confidentiality or integrity of user data is likely to be in scope for the program. Common examples including: + +* Cross-site scripting +* Cross-site request forgery +* Mixed-content scripts +* Authentication or authorization flaws +* Server-side code execution bugs + +Out of concern for the availability of our services to all users, please do not attempt to carry out DoS attacks, leverage black hat SEO techniques, spam people, brute force authentication, or do other similarly questionable things. We also discourage the use of any vulnerability testing tools that automatically generate very significant volumes of traffic. + +## Non-qualifying Vulnerabilities + +Depending on their impacts, some of the reported issues may not qualify. +Although we review them on a case-by-case basis, here are some of the issues that typically do not earn a monetary reward: + +* Bugs requiring exceedingly unlikely user interaction Brute forcing +* User enumeration +* Non security related bugs +* Abuse + +## Reporting a Vulnerability + +1. When investigating a vulnerability, please, only ever target your own accounts. Never attempt to access anyone else's data and do not engage in any activity that would be disruptive or damaging to other users. +2. In the case the same vulnerability is present on multiple products, please combine and send one report. +3. If you have found a vulnerability, please contact us at security@emqx.io. +4. Note that we are only able to answer technical vulnerability reports. Duplicate reports will not be rewarded, first report on the specific vulnerability will be rewarded. +5. The report should include steps in plain text how to reproduce the vulnerability (not only video or images). From be7918aa4112cc15e7b27f52fa38cf6a44dc9a45 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Wed, 12 Jul 2023 16:09:23 -0300 Subject: [PATCH 16/73] fix(gcp_pubsub_consumer): fail health check when there are no workers `ecpool` already returns an error even if the worker process is dead, but we add the empty worker list clause here just for completeness. --- .../emqx_bridge_gcp_pubsub_impl_consumer.erl | 2 ++ .../emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_consumer.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_consumer.erl index e04794bfb..8f67d2678 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_consumer.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_consumer.erl @@ -249,6 +249,8 @@ check_workers(InstanceId, Client) -> #{return_values => true} ) of + {ok, []} -> + connecting; {ok, Values} -> AllOk = lists:all(fun(S) -> S =:= subscription_ok end, Values), case AllOk of diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index 2a04b5ee1..4828e1730 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -1004,7 +1004,27 @@ t_bridge_rule_action_source(Config) -> ok. t_on_get_status(Config) -> + ResourceId = resource_id(Config), emqx_bridge_testlib:t_on_get_status(Config, #{failure_status => connecting}), + %% no workers alive + ?retry( + _Interval0 = 200, + _NAttempts0 = 20, + ?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + WorkerPids = get_pull_worker_pids(Config), + emqx_utils:pmap( + fun(Pid) -> + Ref = monitor(process, Pid), + exit(Pid, kill), + receive + {'DOWN', Ref, process, Pid, killed} -> + ok + end + end, + WorkerPids + ), + ?assertMatch({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), ok. t_create_via_http_api(_Config) -> From 26d4ee5780a33c9138bcb86eddbc11cbef3c139b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 11 Jul 2023 20:07:16 -0300 Subject: [PATCH 17/73] ci(fix): actually fail check for missing reboot apps --- Makefile | 2 +- mix.exs | 21 ++++++++++++++++++- scripts/check_missing_reboot_apps.exs | 30 ++++++++++++++++++++------- 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index fe533b92d..bb2694d5d 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ static_checks: @$(REBAR) as check do xref, dialyzer @if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi ./scripts/check-i18n-style.sh - ./scripts/check_missing_reboot_apps.exs --profile $(PROFILE) + ./scripts/check_missing_reboot_apps.exs APPS=$(shell $(SCRIPTS)/find-apps.sh) diff --git a/mix.exs b/mix.exs index 9305b2d57..548e32d36 100644 --- a/mix.exs +++ b/mix.exs @@ -446,13 +446,32 @@ defmodule EMQXUmbrella.MixProject do def check_profile!() do valid_envs = [ - :dev, :emqx, :"emqx-pkg", :"emqx-enterprise", :"emqx-enterprise-pkg" ] + if Mix.env() == :dev do + env_profile = System.get_env("PROFILE") + + if env_profile do + # copy from PROFILE env var + System.get_env("PROFILE") + |> String.to_atom() + |> Mix.env() + else + IO.puts( + IO.ANSI.format([ + :yellow, + "Warning: env var PROFILE is unset; defaulting to emqx" + ]) + ) + + Mix.env(:emqx) + end + end + if Mix.env() not in valid_envs do formatted_envs = valid_envs diff --git a/scripts/check_missing_reboot_apps.exs b/scripts/check_missing_reboot_apps.exs index 9ea2c7925..d9933e099 100755 --- a/scripts/check_missing_reboot_apps.exs +++ b/scripts/check_missing_reboot_apps.exs @@ -1,19 +1,33 @@ #!/usr/bin/env elixir -{parsed, _argv, _errors = []} = - OptionParser.parse( - System.argv(), - strict: [profile: :string] - ) +alias EMQXUmbrella.MixProject -profile = Keyword.fetch!(parsed, :profile) +{:ok, _} = Application.ensure_all_started(:mix) +# note: run from the project root +File.cwd!() +|> Path.join("mix.exs") +|> Code.compile_file() + +inputs = MixProject.check_profile!() +profile = Mix.env() + +# need to use this information because we might have compiled all +# applications in the test profile, and thus filter what's in the +# release lib directory. +rel_apps = MixProject.applications(inputs.edition_type) + +apps = + rel_apps + |> Keyword.keys() + |> Enum.filter(&(to_string(&1) =~ "emqx")) + |> Enum.reject(&(&1 in [:emqx_mix])) :xref.start(:xref) :xref.set_default(:xref, warnings: false) rel_dir = '_build/#{profile}/lib/' :xref.add_release(:xref, rel_dir) -{:ok, calls} = :xref.q(:xref, '(App) (XC || "mria":"create_table"/".*")') +{:ok, calls} = :xref.q(:xref, '(App) (XC | [#{Enum.join(apps, ",")}] || mria:create_table/_)') emqx_calls = calls @@ -59,4 +73,6 @@ if MapSet.size(missing_reboot_apps) != 0 do " otherwise, when a node joins a cluster, it might lose tables.\n" ]) ) + + System.halt(1) end From 01b143c5ad0508e0352951d0135d4b8e00b729f2 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Thu, 13 Jul 2023 16:00:25 -0300 Subject: [PATCH 18/73] fix(resource): don't destruct error tuple Otherwise, `emqx_resource:query` won't correctly deem the resource to be unhealthy when there's an extra message. --- apps/emqx_bridge/src/emqx_bridge_api.erl | 7 ++++++- .../test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 2 +- apps/emqx_resource/src/emqx_resource_manager.erl | 1 - 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index a71315a27..7056e6059 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -546,7 +546,12 @@ schema("/bridges_probe") -> ?NO_CONTENT; {error, #{kind := validation_error} = Reason} -> ?BAD_REQUEST('TEST_FAILED', map_to_json(Reason)); - {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> + {error, Reason0} when not is_tuple(Reason0); element(1, Reason0) =/= 'exit' -> + Reason = + case Reason0 of + {unhealthy_target, Message} -> Message; + _ -> Reason0 + end, ?BAD_REQUEST('TEST_FAILED', Reason) end; BadRequest -> diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index 4828e1730..e2ea48e1c 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -1211,7 +1211,7 @@ t_nonexistent_topic(Config) -> emqx_resource_manager:health_check(ResourceId) ), ?assertMatch( - {ok, _Group, #{error := "GCP PubSub topics are invalid" ++ _}}, + {ok, _Group, #{error := {unhealthy_target, "GCP PubSub topics are invalid" ++ _}}}, emqx_resource_manager:lookup_cached(ResourceId) ), %% now create the topic and restart the bridge diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 195e9b3a7..2e4822a2f 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -642,7 +642,6 @@ status_to_error(_) -> {error, undefined}. %% Compatibility -external_error({error, {unhealthy_target, Message}}) -> Message; external_error({error, Reason}) -> Reason; external_error(Other) -> Other. From 0c448a7546d8c540944ff34bc9225dde975cd863 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Fri, 14 Jul 2023 10:16:07 +0200 Subject: [PATCH 19/73] ci: add codeql workflow --- .github/workflows/codeql.yaml | 61 +++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/codeql.yaml diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml new file mode 100644 index 000000000..6d4cc3dc4 --- /dev/null +++ b/.github/workflows/codeql.yaml @@ -0,0 +1,61 @@ +name: "CodeQL" + +on: + schedule: + - cron: '33 14 * * 4' + workflow_dispatch: + inputs: + ref: + required: false + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + timeout-minutes: 360 + permissions: + actions: read + contents: read + security-events: write + container: + image: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 + + strategy: + fail-fast: false + matrix: + language: [ 'cpp', 'python' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + ref: ${{ github.event.inputs.ref }} + + - name: Ensure git safe dir + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + make ensure-rebar3 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + + - name: Build + if: matrix.language == 'cpp' + env: + PROFILE: emqx-enterprise + run: | + make emqx-enterprise-compile + + - name: Fetch deps + if: matrix.language == 'python' + env: + PROFILE: emqx-enterprise + run: | + make deps-emqx-enterprise + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" From 950d5edc4143532977e785b6db5626126de38e33 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Fri, 14 Jul 2023 14:22:43 +0300 Subject: [PATCH 20/73] fix: avoid logging unnecessary errors in async cleanup functions Cleanup functions that access ETS tables may fail with `badarg` error during EMQX shutdown. They are called asynchronously by `emqx_pool` workers and accessed ETS tables may be already destroyed as their owners are shut down. This fix catches ETS `badarg` errors before they can be caught and logged by `emqx_pool`. Fixes: EMQX-9992 --- apps/emqx/src/emqx_broker_helper.erl | 22 +++++++++++++--------- apps/emqx/src/emqx_cm.erl | 6 +++++- apps/emqx_gateway/src/emqx_gateway_cm.erl | 6 +++++- changes/ce/fix-11065.en.md | 1 + 4 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 changes/ce/fix-11065.en.md diff --git a/apps/emqx/src/emqx_broker_helper.erl b/apps/emqx/src/emqx_broker_helper.erl index 06f249678..ea615c2f7 100644 --- a/apps/emqx/src/emqx_broker_helper.erl +++ b/apps/emqx/src/emqx_broker_helper.erl @@ -153,13 +153,17 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- clean_down(SubPid) -> - case ets:lookup(?SUBMON, SubPid) of - [{_, SubId}] -> - true = ets:delete(?SUBMON, SubPid), - true = - (SubId =:= undefined) orelse - ets:delete_object(?SUBID, {SubId, SubPid}), - emqx_broker:subscriber_down(SubPid); - [] -> - ok + try + case ets:lookup(?SUBMON, SubPid) of + [{_, SubId}] -> + true = ets:delete(?SUBMON, SubPid), + true = + (SubId =:= undefined) orelse + ets:delete_object(?SUBID, {SubId, SubPid}), + emqx_broker:subscriber_down(SubPid); + [] -> + ok + end + catch + error:badarg -> ok end. diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index c193cea44..40dece5a9 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -734,7 +734,11 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- clean_down({ChanPid, ClientId}) -> - do_unregister_channel({ClientId, ChanPid}), + try + do_unregister_channel({ClientId, ChanPid}) + catch + error:badarg -> ok + end, ok = ?tp(debug, emqx_cm_clean_down, #{client_id => ClientId}). stats_fun() -> diff --git a/apps/emqx_gateway/src/emqx_gateway_cm.erl b/apps/emqx_gateway/src/emqx_gateway_cm.erl index 4c07d3938..e52c81856 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cm.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cm.erl @@ -823,7 +823,11 @@ code_change(_OldVsn, State, _Extra) -> do_unregister_channel_task(Items, GwName, CmTabs) -> lists:foreach( fun({ChanPid, ClientId}) -> - do_unregister_channel(GwName, {ClientId, ChanPid}, CmTabs) + try + do_unregister_channel(GwName, {ClientId, ChanPid}, CmTabs) + catch + error:badarg -> ok + end end, Items ). diff --git a/changes/ce/fix-11065.en.md b/changes/ce/fix-11065.en.md new file mode 100644 index 000000000..e5742bfe0 --- /dev/null +++ b/changes/ce/fix-11065.en.md @@ -0,0 +1 @@ +Avoid logging irrelevant error messages during EMQX shutdown. From ab518a1386613d13349fa2902d166a0fee048ecf Mon Sep 17 00:00:00 2001 From: Kinplemelon Date: Mon, 17 Jul 2023 16:25:07 +0800 Subject: [PATCH 21/73] chore: upgrade dashboard to v1.3.2 for ce --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bb2694d5d..9b9bcda15 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ endif # Dashboard version # from https://github.com/emqx/emqx-dashboard5 -export EMQX_DASHBOARD_VERSION ?= v1.3.1 +export EMQX_DASHBOARD_VERSION ?= v1.3.2 export EMQX_EE_DASHBOARD_VERSION ?= e1.1.1-beta.3 # `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used From 05c3e023a92d6d596cc8a09b396f69a4989d90d4 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 17 Jul 2023 11:24:21 -0300 Subject: [PATCH 22/73] chore(gcp_pubsub_consumer): unhide GCP PubSub Consumer bridge for e5.2.0 Fixes https://emqx.atlassian.net/browse/EMQX-10506 --- .../src/schema/emqx_bridge_enterprise.erl | 4 +--- .../emqx_bridge_gcp_pubsub_consumer_SUITE.erl | 23 +++++++++---------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index e76d1af37..c770e4702 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -20,8 +20,7 @@ api_schemas(Method) -> %% We need to map the `type' field of a request (binary) to a %% bridge schema module. api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub">>, Method ++ "_producer"), - %% TODO: un-hide for e5.2.0... - %% api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"), + api_ref(emqx_bridge_gcp_pubsub, <<"gcp_pubsub_consumer">>, Method ++ "_consumer"), api_ref(emqx_bridge_kafka, <<"kafka_consumer">>, Method ++ "_consumer"), %% TODO: rename this to `kafka_producer' after alias support is added %% to hocon; keeping this as just `kafka' for backwards compatibility. @@ -263,7 +262,6 @@ gcp_pubsub_structs() -> hoconsc:map(name, ref(emqx_bridge_gcp_pubsub, "config_consumer")), #{ desc => <<"EMQX Enterprise Config">>, - importance => ?IMPORTANCE_HIDDEN, required => false } )} diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl index e2ea48e1c..9cfe88b7e 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_consumer_SUITE.erl @@ -902,16 +902,15 @@ t_consume_ok(Config) -> ?assertEqual(3, emqx_resource_metrics:received_get(ResourceId)) ), - %% FIXME: uncomment after API spec is un-hidden... - %% %% Check that the bridge probe API doesn't leak atoms. - %% ProbeRes0 = probe_bridge_api(Config), - %% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), - %% AtomsBefore = erlang:system_info(atom_count), - %% %% Probe again; shouldn't have created more atoms. - %% ProbeRes1 = probe_bridge_api(Config), - %% ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), - %% AtomsAfter = erlang:system_info(atom_count), - %% ?assertEqual(AtomsBefore, AtomsAfter), + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), assert_non_received_metrics(BridgeName), ?block_until( @@ -1027,8 +1026,8 @@ t_on_get_status(Config) -> ?assertMatch({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), ok. -t_create_via_http_api(_Config) -> - ct:comment("FIXME: implement after API specs are un-hidden in e5.2.0..."), +t_create_update_via_http_api(Config) -> + emqx_bridge_testlib:t_create_via_http(Config), ok. t_multiple_topic_mappings(Config) -> From 484519dcf9fbd607305f19086641072d114c1d52 Mon Sep 17 00:00:00 2001 From: Paulo Zulato Date: Mon, 3 Jul 2023 16:17:42 -0300 Subject: [PATCH 23/73] feat(kinesis): implement Amazon Kinesis Producer bridge Fixes https://emqx.atlassian.net/browse/EMQX-10474 Fixes https://emqx.atlassian.net/browse/EMQX-10475 --- .ci/docker-compose-file/.env | 1 + .../docker-compose-kinesis.yaml | 12 + .../docker-compose-toxiproxy.yaml | 2 + .ci/docker-compose-file/toxiproxy.json | 6 + apps/emqx_bridge/src/emqx_bridge.erl | 3 +- apps/emqx_bridge/src/emqx_bridge_resource.erl | 2 + .../src/schema/emqx_bridge_enterprise.erl | 24 +- apps/emqx_bridge_dynamo/rebar.config | 2 +- apps/emqx_bridge_kinesis/BSL.txt | 94 ++ apps/emqx_bridge_kinesis/README.md | 22 + apps/emqx_bridge_kinesis/docker-ct | 2 + apps/emqx_bridge_kinesis/rebar.config | 11 + .../src/emqx_bridge_kinesis.app.src | 13 + .../src/emqx_bridge_kinesis.erl | 167 ++++ .../emqx_bridge_kinesis_connector_client.erl | 178 ++++ .../src/emqx_bridge_kinesis_impl_producer.erl | 247 ++++++ ...mqx_bridge_kinesis_impl_producer_SUITE.erl | 817 ++++++++++++++++++ apps/emqx_s3/rebar.config | 2 +- changes/ee/feat-11261.en.md | 1 + mix.exs | 6 +- rebar.config.erl | 4 +- rel/i18n/emqx_bridge_kinesis.hocon | 85 ++ scripts/ct/run.sh | 3 + 23 files changed, 1694 insertions(+), 10 deletions(-) create mode 100644 .ci/docker-compose-file/docker-compose-kinesis.yaml create mode 100644 apps/emqx_bridge_kinesis/BSL.txt create mode 100644 apps/emqx_bridge_kinesis/README.md create mode 100644 apps/emqx_bridge_kinesis/docker-ct create mode 100644 apps/emqx_bridge_kinesis/rebar.config create mode 100644 apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src create mode 100644 apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl create mode 100644 apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl create mode 100644 apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl create mode 100644 apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl create mode 100644 changes/ee/feat-11261.en.md create mode 100644 rel/i18n/emqx_bridge_kinesis.hocon diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index 12bc988bf..e99a6d13f 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -9,6 +9,7 @@ DYNAMO_TAG=1.21.0 CASSANDRA_TAG=3.11.6 MINIO_TAG=RELEASE.2023-03-20T20-16-18Z OPENTS_TAG=9aa7f88 +KINESIS_TAG=2.1 MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server SQLSERVER_TAG=2019-CU19-ubuntu-20.04 diff --git a/.ci/docker-compose-file/docker-compose-kinesis.yaml b/.ci/docker-compose-file/docker-compose-kinesis.yaml new file mode 100644 index 000000000..d05b7c6c7 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-kinesis.yaml @@ -0,0 +1,12 @@ +version: '3.9' + +services: + kinesis: + container_name: kinesis + image: localstack/localstack:2.1 + environment: + - KINESIS_ERROR_PROBABILITY=0.0 + - KINESIS_LATENCY=0 + restart: always + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index c0c88aef0..74d2583c9 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -49,6 +49,8 @@ services: - 38080:38080 # HStreamDB - 15670:5670 + # Kinesis + - 4566:4566 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index d5576108f..c9590354b 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -161,5 +161,11 @@ "listen": "0.0.0.0:6570", "upstream": "hstreamdb:6570", "enabled": true + }, + { + "name": "kinesis", + "listen": "0.0.0.0:4566", + "upstream": "kinesis:4566", + "enabled": true } ] diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index c1692b9af..d5fc42ade 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -88,7 +88,8 @@ T == sqlserver; T == pulsar_producer; T == oracle; - T == iotdb + T == iotdb; + T == kinesis_producer ). -define(ROOT_KEY, bridges). diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 539753b3b..62f0d7d89 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -374,6 +374,8 @@ parse_confs(<<"kafka">> = _Type, Name, Conf) -> Conf#{bridge_name => Name}; parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> Conf#{bridge_name => Name}; +parse_confs(<<"kinesis_producer">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index e76d1af37..02a03a6d6 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -49,7 +49,8 @@ api_schemas(Method) -> api_ref(emqx_bridge_pulsar, <<"pulsar_producer">>, Method ++ "_producer"), api_ref(emqx_bridge_oracle, <<"oracle">>, Method), api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method), - api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method) + api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method), + api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer") ]. schema_modules() -> @@ -74,7 +75,8 @@ schema_modules() -> emqx_bridge_pulsar, emqx_bridge_oracle, emqx_bridge_iotdb, - emqx_bridge_rabbitmq + emqx_bridge_rabbitmq, + emqx_bridge_kinesis ]. examples(Method) -> @@ -119,7 +121,8 @@ resource_type(opents) -> emqx_bridge_opents_connector; resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer; resource_type(oracle) -> emqx_oracle; resource_type(iotdb) -> emqx_bridge_iotdb_impl; -resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector. +resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector; +resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer. fields(bridges) -> [ @@ -199,7 +202,8 @@ fields(bridges) -> ] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ - pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs(). + pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++ + kinesis_structs(). mongodb_structs() -> [ @@ -365,6 +369,18 @@ rabbitmq_structs() -> )} ]. +kinesis_structs() -> + [ + {kinesis_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_kinesis, "config_producer")), + #{ + desc => <<"Amazon Kinesis Producer Bridge Config">>, + required => false + } + )} + ]. + api_ref(Module, Type, Method) -> {Type, ref(Module, Method)}. diff --git a/apps/emqx_bridge_dynamo/rebar.config b/apps/emqx_bridge_dynamo/rebar.config index 672e8efc2..e80fb0f80 100644 --- a/apps/emqx_bridge_dynamo/rebar.config +++ b/apps/emqx_bridge_dynamo/rebar.config @@ -1,6 +1,6 @@ %% -*- mode: erlang; -*- {erl_opts, [debug_info]}. -{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}} +{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}} , {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_resource, {path, "../../apps/emqx_resource"}} , {emqx_bridge, {path, "../../apps/emqx_bridge"}} diff --git a/apps/emqx_bridge_kinesis/BSL.txt b/apps/emqx_bridge_kinesis/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_kinesis/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_kinesis/README.md b/apps/emqx_bridge_kinesis/README.md new file mode 100644 index 000000000..097c27a92 --- /dev/null +++ b/apps/emqx_bridge_kinesis/README.md @@ -0,0 +1,22 @@ +# Amazon Kinesis Data Integration Bridge + +This application houses the Amazon Kinesis Producer data +integration bridge for EMQX Enterprise Edition. It provides the means to +connect to Amazon Kinesis Data Streams and publish messages to it. + +# Documentation links + +For more information about Amazon Kinesis Data Streams, please see its +[official site](https://aws.amazon.com/kinesis/data-streams/). + +# Configurations + +Please see [Ingest Data into Kinesis](https://docs.emqx.com/en/enterprise/v5.1/data-integration/data-bridge-kinesis.html) for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_kinesis/docker-ct b/apps/emqx_bridge_kinesis/docker-ct new file mode 100644 index 000000000..4422ee81e --- /dev/null +++ b/apps/emqx_bridge_kinesis/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +kinesis diff --git a/apps/emqx_bridge_kinesis/rebar.config b/apps/emqx_bridge_kinesis/rebar.config new file mode 100644 index 000000000..e4b57846e --- /dev/null +++ b/apps/emqx_bridge_kinesis/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_kinesis]} +]}. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src new file mode 100644 index 000000000..36f6c8b0b --- /dev/null +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src @@ -0,0 +1,13 @@ +{application, emqx_bridge_kinesis, [ + {description, "EMQX Enterprise Amazon Kinesis Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + erlcloud + ]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl new file mode 100644 index 000000000..cb3cd3788 --- /dev/null +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl @@ -0,0 +1,167 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kinesis). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-export([ + conn_bridge_examples/1 +]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_kinesis". + +roots() -> + []. + +fields("config_producer") -> + emqx_bridge_schema:common_bridge_fields() ++ + emqx_resource_schema:fields("resource_opts") ++ + fields(connector_config) ++ fields(producer); +fields(connector_config) -> + [ + {aws_access_key_id, + mk( + binary(), + #{ + required => true, + desc => ?DESC("aws_access_key_id") + } + )}, + {aws_secret_access_key, + mk( + binary(), + #{ + required => true, + desc => ?DESC("aws_secret_access_key"), + sensitive => true + } + )}, + {endpoint, + mk( + binary(), + #{ + default => <<"https://kinesis.us-east-1.amazonaws.com">>, + desc => ?DESC("endpoint") + } + )}, + {max_retries, + mk( + non_neg_integer(), + #{ + required => false, + default => 2, + desc => ?DESC("max_retries") + } + )}, + {pool_size, + sc( + pos_integer(), + #{ + default => 8, + desc => ?DESC("pool_size") + } + )} + ]; +fields(producer) -> + [ + {payload_template, + sc( + binary(), + #{ + default => <<>>, + desc => ?DESC("payload_template") + } + )}, + {local_topic, + sc( + binary(), + #{ + desc => ?DESC("local_topic") + } + )}, + {stream_name, + sc( + binary(), + #{ + required => true, + desc => ?DESC("stream_name") + } + )}, + {partition_key, + sc( + binary(), + #{ + required => true, + desc => ?DESC("partition_key") + } + )} + ]; +fields("get_producer") -> + emqx_bridge_schema:status_fields() ++ fields("post_producer"); +fields("post_producer") -> + [type_field_producer(), name_field() | fields("config_producer")]; +fields("put_producer") -> + fields("config_producer"). + +desc("config_producer") -> + ?DESC("desc_config"); +desc(_) -> + undefined. + +conn_bridge_examples(Method) -> + [ + #{ + <<"kinesis_producer">> => #{ + summary => <<"Amazon Kinesis Producer Bridge">>, + value => values(producer, Method) + } + } + ]. + +values(producer, _Method) -> + #{ + aws_access_key_id => <<"aws_access_key_id">>, + aws_secret_access_key => <<"******">>, + endpoint => <<"https://kinesis.us-east-1.amazonaws.com">>, + max_retries => 3, + stream_name => <<"stream_name">>, + partition_key => <<"key">>, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => 15000, + query_mode => async, + inflight_window => 100, + max_buffer_bytes => 100 * 1024 * 1024 + } + }. + +%%------------------------------------------------------------------------------------------------- +%% Helper fns +%%------------------------------------------------------------------------------------------------- + +sc(Type, Meta) -> hoconsc:mk(Type, Meta). + +mk(Type, Meta) -> hoconsc:mk(Type, Meta). + +enum(OfSymbols) -> hoconsc:enum(OfSymbols). + +type_field_producer() -> + {type, mk(enum([kinesis_producer]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl new file mode 100644 index 000000000..bb1000e5f --- /dev/null +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl @@ -0,0 +1,178 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kinesis_connector_client). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("erlcloud/include/erlcloud_aws.hrl"). + +-behaviour(gen_server). + +-type state() :: #{ + instance_id := resource_id(), + partition_key := binary(), + stream_name := binary() +}. +-type record() :: {Data :: binary(), PartitionKey :: binary()}. + +-define(DEFAULT_PORT, 443). + +%% API +-export([ + start_link/1, + connection_status/1, + query/2 +]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-ifdef(TEST). +-export([execute/2]). +-endif. + +%% The default timeout for Kinesis API calls is 10 seconds, +%% but this value for `gen_server:call` is 5s, +%% so we should adjust timeout for `gen_server:call` +-define(HEALTH_CHECK_TIMEOUT, 15000). + +%%%=================================================================== +%%% API +%%%=================================================================== +connection_status(Pid) -> + try + gen_server:call(Pid, connection_status, ?HEALTH_CHECK_TIMEOUT) + catch + _:_ -> + {error, timeout} + end. + +query(Pid, Records) -> + gen_server:call(Pid, {query, Records}, infinity). + +%%-------------------------------------------------------------------- +%% @doc +%% Starts Bridge which communicates to Amazon Kinesis Data Streams +%% @end +%%-------------------------------------------------------------------- +start_link(Options) -> + gen_server:start_link(?MODULE, Options, []). + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +%% Initialize kinesis connector +-spec init(emqx_bridge_kinesis_impl_producer:config()) -> {ok, state()}. +init(#{ + aws_access_key_id := AwsAccessKey, + aws_secret_access_key := AwsSecretAccessKey, + endpoint := Endpoint, + partition_key := PartitionKey, + stream_name := StreamName, + max_retries := MaxRetries, + instance_id := InstanceId +}) -> + process_flag(trap_exit, true), + + #{scheme := Scheme, hostname := Host, port := Port} = + emqx_schema:parse_server( + Endpoint, + #{ + default_port => ?DEFAULT_PORT, + supported_schemes => ["http", "https"] + } + ), + State = #{ + instance_id => InstanceId, + partition_key => PartitionKey, + stream_name => StreamName + }, + New = + fun(AccessKeyID, SecretAccessKey, HostAddr, HostPort, ConnectionScheme) -> + Config0 = erlcloud_kinesis:new( + AccessKeyID, + SecretAccessKey, + HostAddr, + HostPort, + ConnectionScheme ++ "://" + ), + Config0#aws_config{retry_num = MaxRetries} + end, + erlcloud_config:configure( + to_str(AwsAccessKey), to_str(AwsSecretAccessKey), Host, Port, Scheme, New + ), + {ok, State}. + +handle_call(connection_status, _From, #{stream_name := StreamName} = State) -> + Status = + case erlcloud_kinesis:describe_stream(StreamName) of + {ok, _} -> + {ok, connected}; + {error, {<<"ResourceNotFoundException">>, _}} -> + {error, unhealthy_target}; + Error -> + {error, Error} + end, + {reply, Status, State}; +handle_call({query, Records}, _From, #{stream_name := StreamName} = State) -> + Result = do_query(StreamName, Records), + {reply, Result, State}; +handle_call(_Request, _From, State) -> + {reply, {error, unknown_call}, State}. + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(Reason, #{instance_id := InstanceId} = _State) -> + ?tp(kinesis_stop, #{instance_id => InstanceId, reason => Reason}), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== + +-spec do_query(binary(), [record()]) -> + {ok, jsx:json_term() | binary()} + | {error, {unrecoverable_error, term()}} + | {error, term()}. +do_query(StreamName, Records) -> + try + execute(put_record, {StreamName, Records}) + catch + _Type:Reason -> + {error, {unrecoverable_error, {invalid_request, Reason}}} + end. + +-spec execute(put_record, {binary(), [record()]}) -> + {ok, jsx:json_term() | binary()} + | {error, term()}. +execute(put_record, {StreamName, [{Data, PartitionKey}] = Record}) -> + Result = erlcloud_kinesis:put_record(StreamName, PartitionKey, Data), + ?tp(kinesis_put_record, #{records => Record, result => Result}), + Result; +execute(put_record, {StreamName, Items}) when is_list(Items) -> + Result = erlcloud_kinesis:put_records(StreamName, Items), + ?tp(kinesis_put_record, #{records => Items, result => Result}), + Result. + +-spec to_str(list() | binary()) -> list(). +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl new file mode 100644 index 000000000..7948581b5 --- /dev/null +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl @@ -0,0 +1,247 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kinesis_impl_producer). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(HEALTH_CHECK_TIMEOUT, 15000). +-define(TOPIC_MESSAGE, + "Kinesis stream is invalid. Please check if the stream exist in Kinesis account." +). + +-type config() :: #{ + aws_access_key_id := binary(), + aws_secret_access_key := binary(), + endpoint := binary(), + stream_name := binary(), + partition_key := binary(), + payload_template := binary(), + max_retries := non_neg_integer(), + pool_size := non_neg_integer(), + instance_id => resource_id(), + any() => term() +}. +-type templates() :: #{ + partition_key := list(), + send_message := list() +}. +-type state() :: #{ + pool_name := resource_id(), + templates := templates() +}. +-export_type([config/0]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([ + connect/1 +]). + +%%------------------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------------------- + +callback_mode() -> always_sync. + +-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. +on_start( + InstanceId, + #{ + pool_size := PoolSize + } = Config0 +) -> + ?SLOG(info, #{ + msg => "starting_kinesis_bridge", + connector => InstanceId, + config => redact(Config0) + }), + Config = Config0#{instance_id => InstanceId}, + Options = [ + {config, Config}, + {pool_size, PoolSize} + ], + Templates = parse_template(Config), + State = #{ + pool_name => InstanceId, + templates => Templates + }, + + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + ?tp(emqx_bridge_kinesis_impl_producer_start_ok, #{config => Config}), + {ok, State}; + Error -> + ?tp(emqx_bridge_kinesis_impl_producer_start_failed, #{config => Config}), + Error + end. + +-spec on_stop(resource_id(), state()) -> ok | {error, term()}. +on_stop(InstanceId, _State) -> + emqx_resource_pool:stop(InstanceId). + +-spec on_get_status(resource_id(), state()) -> + connected | disconnected | {disconnected, state(), {unhealthy_target, string()}}. +on_get_status(_InstanceId, #{pool_name := Pool} = State) -> + case + emqx_resource_pool:health_check_workers( + Pool, + {emqx_bridge_kinesis_connector_client, connection_status, []}, + ?HEALTH_CHECK_TIMEOUT, + #{return_values => true} + ) + of + {ok, Values} -> + AllOk = lists:all(fun(S) -> S =:= {ok, connected} end, Values), + case AllOk of + true -> + connected; + false -> + Unhealthy = lists:any(fun(S) -> S =:= {error, unhealthy_target} end, Values), + case Unhealthy of + true -> {disconnected, State, {unhealthy_target, ?TOPIC_MESSAGE}}; + false -> disconnected + end + end; + {error, _} -> + disconnected + end. + +-spec on_query( + resource_id(), + {send_message, map()}, + state() +) -> + {ok, map()} + | {error, {recoverable_error, term()}} + | {error, term()}. +on_query(ResourceId, {send_message, Message}, State) -> + Requests = [{send_message, Message}], + ?tp(emqx_bridge_kinesis_impl_producer_sync_query, #{message => Message}), + do_send_requests_sync(ResourceId, Requests, State). + +-spec on_batch_query( + resource_id(), + [{send_message, map()}], + state() +) -> + {ok, map()} + | {error, {recoverable_error, term()}} + | {error, term()}. +%% we only support batch insert +on_batch_query(ResourceId, [{send_message, _} | _] = Requests, State) -> + ?tp(emqx_bridge_kinesis_impl_producer_sync_batch_query, #{requests => Requests}), + do_send_requests_sync(ResourceId, Requests, State). + +connect(Opts) -> + Options = proplists:get_value(config, Opts), + emqx_bridge_kinesis_connector_client:start_link(Options). + +%%------------------------------------------------------------------------------------------------- +%% Helper fns +%%------------------------------------------------------------------------------------------------- + +-spec do_send_requests_sync( + resource_id(), + [{send_message, map()}], + state() +) -> + {ok, jsx:json_term() | binary()} + | {error, {recoverable_error, term()}} + | {error, {unrecoverable_error, {invalid_request, term()}}} + | {error, {unrecoverable_error, {unhealthy_target, string()}}} + | {error, {unrecoverable_error, term()}} + | {error, term()}. +do_send_requests_sync( + InstanceId, + Requests, + #{pool_name := PoolName, templates := Templates} +) -> + Records = render_records(Requests, Templates), + Result = ecpool:pick_and_do( + PoolName, + {emqx_bridge_kinesis_connector_client, query, [Records]}, + no_handover + ), + handle_result(Result, Requests, InstanceId). + +handle_result({ok, _} = Result, _Requests, _InstanceId) -> + Result; +handle_result({error, {<<"ResourceNotFoundException">>, _} = Reason}, Requests, InstanceId) -> + ?SLOG(error, #{ + msg => "kinesis_error_response", + request => Requests, + connector => InstanceId, + reason => Reason + }), + {error, {unrecoverable_error, {unhealthy_target, ?TOPIC_MESSAGE}}}; +handle_result( + {error, {<<"ProvisionedThroughputExceededException">>, _} = Reason}, Requests, InstanceId +) -> + ?SLOG(error, #{ + msg => "kinesis_error_response", + request => Requests, + connector => InstanceId, + reason => Reason + }), + {error, {recoverable_error, Reason}}; +handle_result({error, {<<"InvalidArgumentException">>, _} = Reason}, Requests, InstanceId) -> + ?SLOG(error, #{ + msg => "kinesis_error_response", + request => Requests, + connector => InstanceId, + reason => Reason + }), + {error, {unrecoverable_error, Reason}}; +handle_result({error, {econnrefused = Reason, _}}, Requests, InstanceId) -> + ?SLOG(error, #{ + msg => "kinesis_error_response", + request => Requests, + connector => InstanceId, + reason => Reason + }), + {error, {recoverable_error, Reason}}; +handle_result({error, Reason} = Error, Requests, InstanceId) -> + ?SLOG(error, #{ + msg => "kinesis_error_response", + request => Requests, + connector => InstanceId, + reason => Reason + }), + Error. + +parse_template(Config) -> + #{payload_template := PayloadTemplate, partition_key := PartitionKeyTemplate} = Config, + Templates = #{send_message => PayloadTemplate, partition_key => PartitionKeyTemplate}, + maps:map(fun(_K, V) -> emqx_placeholder:preproc_tmpl(V) end, Templates). + +render_records(Items, Templates) -> + PartitionKeyTemplate = maps:get(partition_key, Templates), + MsgTemplate = maps:get(send_message, Templates), + render_messages(Items, {MsgTemplate, PartitionKeyTemplate}, []). + +render_messages([], _Templates, RenderedMsgs) -> + RenderedMsgs; +render_messages( + [{send_message, Msg} | Others], + {MsgTemplate, PartitionKeyTemplate} = Templates, + RenderedMsgs +) -> + Data = emqx_placeholder:proc_tmpl(MsgTemplate, Msg), + PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTemplate, Msg), + RenderedMsg = {Data, PartitionKey}, + render_messages(Others, Templates, [RenderedMsg | RenderedMsgs]). + +redact(Config) -> + emqx_utils:redact(Config, fun(Any) -> Any =:= aws_secret_access_key end). diff --git a/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl new file mode 100644 index 000000000..114f324a9 --- /dev/null +++ b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl @@ -0,0 +1,817 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_kinesis_impl_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(PRODUCER, emqx_bridge_kinesis_impl_producer). +-define(BRIDGE_TYPE, kinesis_producer). +-define(BRIDGE_TYPE_BIN, <<"kinesis_producer">>). +-define(KINESIS_PORT, 4566). +-define(TOPIC, <<"t/topic">>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_suite(Config) -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy.emqx.net"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + ProxyName = "kinesis", + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {kinesis_port, ?KINESIS_PORT}, + {proxy_name, ProxyName} + | Config + ]. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), + _ = application:stop(emqx_connector), + ok. + +init_per_group(with_batch, Config) -> + [{batch_size, 100} | Config]; +init_per_group(without_batch, Config) -> + [{batch_size, 1} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestCase, Config0) -> + ok = snabbkaffe:start_trace(), + ProxyHost = ?config(proxy_host, Config0), + ProxyPort = ?config(proxy_port, Config0), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + TimeTrap = + case TestCase of + t_wrong_server -> 60; + _ -> 30 + end, + ct:timetrap({seconds, TimeTrap}), + delete_all_bridges(), + Tid = install_telemetry_handler(TestCase), + put(telemetry_table, Tid), + Config = generate_config(Config0), + create_stream(Config), + [{telemetry_table, Tid} | Config]. + +end_per_testcase(_TestCase, Config) -> + ok = snabbkaffe:stop(), + delete_all_bridges(), + delete_stream(Config), + emqx_common_test_helpers:call_janitor(), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +generate_config(Config0) -> + #{ + name := Name, + config_string := ConfigString, + kinesis_config := KinesisConfig + } = kinesis_config(Config0), + Endpoint = map_get(<<"endpoint">>, KinesisConfig), + #{scheme := Scheme, hostname := Host, port := Port} = + emqx_schema:parse_server( + Endpoint, + #{ + default_port => 443, + supported_schemes => ["http", "https"] + } + ), + ErlcloudConfig = erlcloud_kinesis:new("access_key", "secret", Host, Port, Scheme ++ "://"), + ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name), + [ + {kinesis_name, Name}, + {connection_scheme, Scheme}, + {kinesis_config, KinesisConfig}, + {kinesis_config_string, ConfigString}, + {resource_id, ResourceId}, + {bridge_id, BridgeId}, + {erlcloud_config, ErlcloudConfig} + | Config0 + ]. + +kinesis_config(Config) -> + QueryMode = proplists:get_value(query_mode, Config, async), + Scheme = proplists:get_value(connection_scheme, Config, "http"), + ProxyHost = proplists:get_value(proxy_host, Config), + KinesisPort = proplists:get_value(kinesis_port, Config), + BatchSize = proplists:get_value(batch_size, Config, 100), + BatchTime = proplists:get_value(batch_time, Config, <<"500ms">>), + PayloadTemplate = proplists:get_value(payload_template, Config, "${payload}"), + StreamName = proplists:get_value(stream_name, Config, <<"mystream">>), + PartitionKey = proplists:get_value(partition_key, Config, <<"key">>), + MaxRetries = proplists:get_value(max_retries, Config, 3), + GUID = emqx_guid:to_hexstr(emqx_guid:gen()), + Name = <<(atom_to_binary(?MODULE))/binary, (GUID)/binary>>, + ConfigString = + io_lib:format( + "bridges.kinesis_producer.~s {\n" + " enable = true\n" + " aws_access_key_id = \"aws_access_key_id\"\n" + " aws_secret_access_key = \"aws_secret_access_key\"\n" + " endpoint = \"~s://~s:~b\"\n" + " stream_name = \"~s\"\n" + " partition_key = \"~s\"\n" + " payload_template = \"~s\"\n" + " max_retries = ~b\n" + " pool_size = 1\n" + " resource_opts = {\n" + " health_check_interval = \"3s\"\n" + " request_ttl = 30s\n" + " resume_interval = 1s\n" + " metrics_flush_interval = \"700ms\"\n" + " worker_pool_size = 1\n" + " query_mode = ~s\n" + " batch_size = ~b\n" + " batch_time = \"~s\"\n" + " }\n" + "}\n", + [ + Name, + Scheme, + ProxyHost, + KinesisPort, + StreamName, + PartitionKey, + PayloadTemplate, + MaxRetries, + QueryMode, + BatchSize, + BatchTime + ] + ), + #{ + name => Name, + config_string => ConfigString, + kinesis_config => parse_and_check(ConfigString, Name) + }. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = <<"kinesis_producer">>, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +delete_all_bridges() -> + ct:pal("deleting all bridges"), + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +delete_bridge(Config) -> + Type = ?BRIDGE_TYPE, + Name = ?config(kinesis_name, Config), + ct:pal("deleting bridge ~p", [{Type, Name}]), + emqx_bridge:remove(Type, Name). + +create_bridge_http(Config) -> + create_bridge_http(Config, _KinesisConfigOverrides = #{}). + +create_bridge_http(Config, KinesisConfigOverrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(kinesis_name, Config), + KinesisConfig0 = ?config(kinesis_config, Config), + KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides), + Params = KinesisConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ProbePath = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + ProbeResult = emqx_mgmt_api_test_util:request_api(post, ProbePath, "", AuthHeader, Params), + ct:pal("creating bridge (via http): ~p", [Params]), + ct:pal("probe result: ~p", [ProbeResult]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res0} -> {ok, emqx_utils_json:decode(Res0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge creation result: ~p", [Res]), + ?assertEqual(element(1, ProbeResult), element(1, Res)), + Res. + +create_bridge(Config) -> + create_bridge(Config, _KinesisConfigOverrides = #{}). + +create_bridge(Config, KinesisConfigOverrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(kinesis_name, Config), + KinesisConfig0 = ?config(kinesis_config, Config), + KinesisConfig = emqx_utils_maps:deep_merge(KinesisConfig0, KinesisConfigOverrides), + ct:pal("creating bridge: ~p", [KinesisConfig]), + Res = emqx_bridge:create(TypeBin, Name, KinesisConfig), + ct:pal("bridge creation result: ~p", [Res]), + Res. + +create_rule_and_action_http(Config) -> + BridgeId = ?config(bridge_id, Config), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?TOPIC/binary, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +create_stream(Config) -> + KinesisConfig = ?config(kinesis_config, Config), + ErlcloudConfig = ?config(erlcloud_config, Config), + StreamName = map_get(<<"stream_name">>, KinesisConfig), + {ok, _} = application:ensure_all_started(erlcloud), + delete_stream(StreamName, ErlcloudConfig), + {ok, _} = erlcloud_kinesis:create_stream(StreamName, 1, ErlcloudConfig), + ?retry( + _Sleep = 100, + _Attempts = 10, + begin + {ok, [{<<"StreamDescription">>, StreamInfo}]} = + erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig), + ?assertEqual( + <<"ACTIVE">>, + proplists:get_value(<<"StreamStatus">>, StreamInfo) + ) + end + ), + ok. + +delete_stream(Config) -> + KinesisConfig = ?config(kinesis_config, Config), + ErlcloudConfig = ?config(erlcloud_config, Config), + StreamName = map_get(<<"stream_name">>, KinesisConfig), + {ok, _} = application:ensure_all_started(erlcloud), + delete_stream(StreamName, ErlcloudConfig), + ok. + +delete_stream(StreamName, ErlcloudConfig) -> + case erlcloud_kinesis:delete_stream(StreamName, ErlcloudConfig) of + {ok, _} -> + ?retry( + _Sleep = 100, + _Attempts = 10, + ?assertMatch( + {error, {<<"ResourceNotFoundException">>, _}}, + erlcloud_kinesis:describe_stream(StreamName, ErlcloudConfig) + ) + ); + _ -> + ok + end, + ok. + +wait_record(Config, ShardIt, Timeout, Attempts) -> + [Record] = wait_records(Config, ShardIt, 1, Timeout, Attempts), + Record. + +wait_records(Config, ShardIt, Count, Timeout, Attempts) -> + ErlcloudConfig = ?config(erlcloud_config, Config), + ?retry( + Timeout, + Attempts, + begin + {ok, Ret} = erlcloud_kinesis:get_records(ShardIt, ErlcloudConfig), + Records = proplists:get_value(<<"Records">>, Ret), + Count = length(Records), + Records + end + ). + +get_shard_iterator(Config) -> + get_shard_iterator(Config, 1). + +get_shard_iterator(Config, Index) -> + KinesisConfig = ?config(kinesis_config, Config), + ErlcloudConfig = ?config(erlcloud_config, Config), + StreamName = map_get(<<"stream_name">>, KinesisConfig), + {ok, [{<<"Shards">>, Shards}]} = erlcloud_kinesis:list_shards(StreamName, ErlcloudConfig), + Shard = lists:nth(Index, lists:sort(Shards)), + ShardId = proplists:get_value(<<"ShardId">>, Shard), + {ok, [{<<"ShardIterator">>, ShardIt}]} = + erlcloud_kinesis:get_shard_iterator(StreamName, ShardId, <<"LATEST">>, ErlcloudConfig), + ShardIt. + +install_telemetry_handler(TestCase) -> + Tid = ets:new(TestCase, [ordered_set, public]), + HandlerId = TestCase, + TestPid = self(), + _ = telemetry:attach_many( + HandlerId, + emqx_resource_metrics:events(), + fun(EventName, Measurements, Metadata, _Config) -> + Data = #{ + name => EventName, + measurements => Measurements, + metadata => Metadata + }, + ets:insert(Tid, {erlang:monotonic_time(), Data}), + TestPid ! {telemetry, Data}, + ok + end, + unused_config + ), + emqx_common_test_helpers:on_exit(fun() -> + telemetry:detach(HandlerId), + ets:delete(Tid) + end), + Tid. + +current_metrics(ResourceId) -> + Mapping = metrics_mapping(), + maps:from_list([ + {Metric, F(ResourceId)} + || {Metric, F} <- maps:to_list(Mapping) + ]). + +metrics_mapping() -> + #{ + dropped => fun emqx_resource_metrics:dropped_get/1, + dropped_expired => fun emqx_resource_metrics:dropped_expired_get/1, + dropped_other => fun emqx_resource_metrics:dropped_other_get/1, + dropped_queue_full => fun emqx_resource_metrics:dropped_queue_full_get/1, + dropped_resource_not_found => fun emqx_resource_metrics:dropped_resource_not_found_get/1, + dropped_resource_stopped => fun emqx_resource_metrics:dropped_resource_stopped_get/1, + late_reply => fun emqx_resource_metrics:late_reply_get/1, + failed => fun emqx_resource_metrics:failed_get/1, + inflight => fun emqx_resource_metrics:inflight_get/1, + matched => fun emqx_resource_metrics:matched_get/1, + queuing => fun emqx_resource_metrics:queuing_get/1, + retried => fun emqx_resource_metrics:retried_get/1, + retried_failed => fun emqx_resource_metrics:retried_failed_get/1, + retried_success => fun emqx_resource_metrics:retried_success_get/1, + success => fun emqx_resource_metrics:success_get/1 + }. + +assert_metrics(ExpectedMetrics, ResourceId) -> + Mapping = metrics_mapping(), + Metrics = + lists:foldl( + fun(Metric, Acc) -> + #{Metric := Fun} = Mapping, + Value = Fun(ResourceId), + Acc#{Metric => Value} + end, + #{}, + maps:keys(ExpectedMetrics) + ), + CurrentMetrics = current_metrics(ResourceId), + TelemetryTable = get(telemetry_table), + RecordedEvents = ets:tab2list(TelemetryTable), + ?assertEqual(ExpectedMetrics, Metrics, #{ + current_metrics => CurrentMetrics, recorded_events => RecordedEvents + }), + ok. + +assert_empty_metrics(ResourceId) -> + Mapping = metrics_mapping(), + ExpectedMetrics = + lists:foldl( + fun(Metric, Acc) -> + Acc#{Metric => 0} + end, + #{}, + maps:keys(Mapping) + ), + assert_metrics(ExpectedMetrics, ResourceId). + +wait_telemetry_event(TelemetryTable, EventName, ResourceId) -> + wait_telemetry_event(TelemetryTable, EventName, ResourceId, #{timeout => 5_000, n_events => 1}). + +wait_telemetry_event( + TelemetryTable, + EventName, + ResourceId, + _Opts = #{ + timeout := Timeout, + n_events := NEvents + } +) -> + wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName). + +wait_n_events(_TelemetryTable, _ResourceId, NEvents, _Timeout, _EventName) when NEvents =< 0 -> + ok; +wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) -> + receive + {telemetry, #{name := [_, _, EventName], measurements := #{counter_inc := Inc}} = Event} -> + ct:pal("telemetry event: ~p", [Event]), + wait_n_events(TelemetryTable, ResourceId, NEvents - Inc, Timeout, EventName) + after Timeout -> + RecordedEvents = ets:tab2list(TelemetryTable), + CurrentMetrics = current_metrics(ResourceId), + ct:pal("recorded events: ~p", [RecordedEvents]), + ct:pal("current metrics: ~p", [CurrentMetrics]), + error({timeout_waiting_for_telemetry, EventName}) + end. + +wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> + Events = receive_all_events(GaugeName, Timeout), + case length(Events) > 0 andalso lists:last(Events) of + #{measurements := #{gauge_set := ExpectedValue}} -> + ok; + #{measurements := #{gauge_set := Value}} -> + ct:pal("events: ~p", [Events]), + ct:fail( + "gauge ~p didn't reach expected value ~p; last value: ~p", + [GaugeName, ExpectedValue, Value] + ); + false -> + ct:pal("no ~p gauge events received!", [GaugeName]) + end. + +receive_all_events(EventName, Timeout) -> + receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []). + +receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents -> + lists:reverse(Acc); +receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) -> + receive + {telemetry, #{name := [_, _, EventName]} = Event} -> + receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc]) + after Timeout -> + lists:reverse(Acc) + end. + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin); +to_str(Int) when is_integer(Int) -> + erlang:integer_to_list(Int). + +to_bin(Str) when is_list(Str) -> + erlang:list_to_binary(Str). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_create_via_http(Config) -> + ?assertMatch({ok, _}, create_bridge_http(Config)), + ok. + +t_start_failed_then_fix(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = ?config(resource_id, Config), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := emqx_bridge_kinesis_impl_producer_start_failed}, + 20_000 + ) + end), + ?retry( + _Sleep1 = 1_000, + _Attempts1 = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. + +t_stop(Config) -> + Name = ?config(kinesis_name, Config), + {ok, _} = create_bridge(Config), + ?check_trace( + ?wait_async_action( + emqx_bridge_resource:stop(?BRIDGE_TYPE, Name), + #{?snk_kind := kinesis_stop}, + 5_000 + ), + fun(Trace) -> + ?assertMatch([_], ?of_kind(kinesis_stop, Trace)), + ok + end + ), + ok. + +t_get_status_ok(Config) -> + ResourceId = ?config(resource_id, Config), + {ok, _} = create_bridge(Config), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + ok. + +t_create_unhealthy(Config) -> + delete_stream(Config), + ResourceId = ?config(resource_id, Config), + {ok, _} = create_bridge(Config), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch( + {ok, _, #{error := {unhealthy_target, _}}}, + emqx_resource_manager:lookup_cached(ResourceId) + ), + ok. + +t_get_status_unhealthy(Config) -> + delete_stream(Config), + ResourceId = ?config(resource_id, Config), + {ok, _} = create_bridge(Config), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch( + {ok, _, #{error := {unhealthy_target, _}}}, + emqx_resource_manager:lookup_cached(ResourceId) + ), + ok. + +t_publish_success(Config) -> + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + ?assertMatch({ok, _}, create_bridge(Config)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + ShardIt = get_shard_iterator(Config), + Payload = <<"payload">>, + Message = emqx_message:make(?TOPIC, Payload), + emqx:publish(Message), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 0, + inflight => 0, + matched => 1, + queuing => 0, + retried => 0, + success => 1 + }, + ResourceId + ), + Record = wait_record(Config, ShardIt, 100, 10), + ?assertEqual(Payload, proplists:get_value(<<"Data">>, Record)), + ok. + +t_publish_success_with_template(Config) -> + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + Overrides = + #{ + <<"payload_template">> => <<"${payload.data}">>, + <<"partition_key">> => <<"${payload.key}">> + }, + ?assertMatch({ok, _}, create_bridge(Config, Overrides)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + ShardIt = get_shard_iterator(Config), + Payload = <<"{\"key\":\"my_key\", \"data\":\"my_data\"}">>, + Message = emqx_message:make(?TOPIC, Payload), + emqx:publish(Message), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 0, + inflight => 0, + matched => 1, + queuing => 0, + retried => 0, + success => 1 + }, + ResourceId + ), + Record = wait_record(Config, ShardIt, 100, 10), + ?assertEqual(<<"my_data">>, proplists:get_value(<<"Data">>, Record)), + ok. + +t_publish_multiple_msgs_success(Config) -> + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + ?assertMatch({ok, _}, create_bridge(Config)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + ShardIt = get_shard_iterator(Config), + lists:foreach( + fun(I) -> + Payload = "payload_" ++ to_str(I), + Message = emqx_message:make(?TOPIC, Payload), + emqx:publish(Message) + end, + lists:seq(1, 10) + ), + Records = wait_records(Config, ShardIt, 10, 100, 10), + ReceivedPayloads = + lists:map(fun(Record) -> proplists:get_value(<<"Data">>, Record) end, Records), + lists:foreach( + fun(I) -> + ExpectedPayload = to_bin("payload_" ++ to_str(I)), + ?assertEqual( + {ExpectedPayload, true}, + {ExpectedPayload, lists:member(ExpectedPayload, ReceivedPayloads)} + ) + end, + lists:seq(1, 10) + ), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 0, + inflight => 0, + matched => 10, + queuing => 0, + retried => 0, + success => 10 + }, + ResourceId + ), + ok. + +t_publish_unhealthy(Config) -> + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + ?assertMatch({ok, _}, create_bridge(Config)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + ShardIt = get_shard_iterator(Config), + Payload = <<"payload">>, + Message = emqx_message:make(?TOPIC, Payload), + delete_stream(Config), + emqx:publish(Message), + ?assertError( + {badmatch, {error, {<<"ResourceNotFoundException">>, _}}}, + wait_record(Config, ShardIt, 100, 10) + ), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, failed, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 1, + inflight => 0, + matched => 1, + queuing => 0, + retried => 0, + success => 0 + }, + ResourceId + ), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), + ?assertMatch( + {ok, _, #{error := {unhealthy_target, _}}}, + emqx_resource_manager:lookup_cached(ResourceId) + ), + ok. + +t_publish_big_msg(Config) -> + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + ?assertMatch({ok, _}, create_bridge(Config)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + % Maximum size is 1MB. Using 1MB + 1 here. + Payload = binary:copy(<<"a">>, 1 * 1024 * 1024 + 1), + Message = emqx_message:make(?TOPIC, Payload), + emqx:publish(Message), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, failed, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 1, + inflight => 0, + matched => 1, + queuing => 0, + retried => 0, + success => 0 + }, + ResourceId + ), + ok. + +t_publish_connection_down(Config0) -> + Config = generate_config([{max_retries, 2} | Config0]), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = ?config(resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), + ?assertMatch({ok, _}, create_bridge(Config)), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + ?retry( + _Sleep1 = 1_000, + _Attempts1 = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + assert_empty_metrics(ResourceId), + ShardIt = get_shard_iterator(Config), + Payload = <<"payload">>, + Message = emqx_message:make(?TOPIC, Payload), + Kind = + case proplists:get_value(batch_size, Config) of + 1 -> emqx_bridge_kinesis_impl_producer_sync_query; + _ -> emqx_bridge_kinesis_impl_producer_sync_batch_query + end, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + ?wait_async_action( + emqx:publish(Message), + #{?snk_kind := Kind}, + 5_000 + ), + ct:sleep(1000) + end), + % Wait for reconnection. + ?retry( + _Sleep3 = 1_000, + _Attempts3 = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Record = wait_record(Config, ShardIt, 2000, 10), + %% to avoid test flakiness + wait_telemetry_event(TelemetryTable, retried_success, ResourceId), + wait_until_gauge_is(queuing, 0, 500), + wait_until_gauge_is(inflight, 0, 500), + assert_metrics( + #{ + dropped => 0, + failed => 0, + inflight => 0, + matched => 1, + queuing => 0, + retried => 1, + success => 1, + retried_success => 1 + }, + ResourceId + ), + Data = proplists:get_value(<<"Data">>, Record), + ?assertEqual(Payload, Data), + ok. + +t_wrong_server(Config) -> + Name = ?config(kinesis_name, Config), + ResourceId = ?config(resource_id, Config), + Overrides = + #{ + <<"max_retries">> => 0, + <<"endpoint">> => <<"https://wrong_server:12345">>, + <<"resource_opts">> => #{ + <<"health_check_interval">> => <<"60s">> + } + }, + ?wait_async_action( + create_bridge(Config, Overrides), + #{?snk_kind := emqx_bridge_kinesis_impl_producer_start_ok}, + 30_000 + ), + ?assertEqual({error, timeout}, emqx_resource_manager:health_check(ResourceId)), + emqx_bridge_resource:stop(?BRIDGE_TYPE, Name), + emqx_bridge_resource:remove(?BRIDGE_TYPE, Name), + ok. diff --git a/apps/emqx_s3/rebar.config b/apps/emqx_s3/rebar.config index 8b0df5c34..1d64e6677 100644 --- a/apps/emqx_s3/rebar.config +++ b/apps/emqx_s3/rebar.config @@ -1,6 +1,6 @@ {deps, [ {emqx, {path, "../../apps/emqx"}}, - {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-1"}}}, + {erlcloud, {git, "https://github.com/emqx/erlcloud", {tag, "3.7.0-emqx-2"}}}, {emqx_bridge_http, {path, "../emqx_bridge_http"}} ]}. diff --git a/changes/ee/feat-11261.en.md b/changes/ee/feat-11261.en.md new file mode 100644 index 000000000..a23f319c8 --- /dev/null +++ b/changes/ee/feat-11261.en.md @@ -0,0 +1 @@ +Implemented Amazon Kinesis Data Streams producer data integration bridge . diff --git a/mix.exs b/mix.exs index 548e32d36..2a2de9a84 100644 --- a/mix.exs +++ b/mix.exs @@ -191,7 +191,8 @@ defmodule EMQXUmbrella.MixProject do :emqx_ft, :emqx_s3, :emqx_schema_registry, - :emqx_enterprise + :emqx_enterprise, + :emqx_bridge_kinesis ]) end @@ -423,7 +424,8 @@ defmodule EMQXUmbrella.MixProject do emqx_schema_registry: :permanent, emqx_eviction_agent: :permanent, emqx_node_rebalance: :permanent, - emqx_ft: :permanent + emqx_ft: :permanent, + emqx_bridge_kinesis: :permanent ], else: [ emqx_telemetry: :permanent diff --git a/rebar.config.erl b/rebar.config.erl index 5a3ec1355..f79c430d7 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -104,6 +104,7 @@ is_community_umbrella_app("apps/emqx_ft") -> false; is_community_umbrella_app("apps/emqx_s3") -> false; is_community_umbrella_app("apps/emqx_schema_registry") -> false; is_community_umbrella_app("apps/emqx_enterprise") -> false; +is_community_umbrella_app("apps/emqx_bridge_kinesis") -> false; is_community_umbrella_app(_) -> true. is_jq_supported() -> @@ -491,7 +492,8 @@ relx_apps_per_edition(ee) -> emqx_schema_registry, emqx_eviction_agent, emqx_node_rebalance, - emqx_ft + emqx_ft, + emqx_bridge_kinesis ]; relx_apps_per_edition(ce) -> [emqx_telemetry]. diff --git a/rel/i18n/emqx_bridge_kinesis.hocon b/rel/i18n/emqx_bridge_kinesis.hocon new file mode 100644 index 000000000..42329bcd6 --- /dev/null +++ b/rel/i18n/emqx_bridge_kinesis.hocon @@ -0,0 +1,85 @@ +emqx_bridge_kinesis { + +config_enable.desc: +"""Enable or disable this bridge""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for an Amazon Kinesis bridge.""" + +desc_config.label: +"""Amazon Kinesis Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type""" + +desc_type.label: +"""Bridge Type""" + +pool_size.desc: +"""The pool size.""" + +pool_size.label: +"""Pool Size""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to Amazon Kinesis. All MQTT `PUBLISH` messages with the topic +matching the `local_topic` will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also `local_topic` is +configured, then both the data got from the rule and the MQTT messages that match `local_topic` +will be forwarded.""" + +local_topic.label: +"""Local Topic""" + +payload_template.desc: +"""The template for formatting the outgoing messages. If undefined, will send all the available context in JSON format.""" + +payload_template.label: +"""Payload template""" + +aws_access_key_id.desc: +"""Access Key ID for connecting to Amazon Kinesis.""" + +aws_access_key_id.label: +"""AWS Access Key ID""" + +aws_secret_access_key.desc: +"""AWS Secret Access Key for connecting to Amazon Kinesis.""" + +aws_secret_access_key.label: +"""AWS Secret Access Key""" + +endpoint.desc: +"""The url of Amazon Kinesis endpoint.""" + +endpoint.label: +"""Amazon Kinesis Endpoint""" + +stream_name.desc: +"""The Amazon Kinesis Stream to publish messages to.""" + +stream_name.label: +"""Amazon Kinesis Stream""" + +partition_key.desc: +"""The Amazon Kinesis Partition Key associated to published message. Placeholders in format of ${var} are supported.""" + +partition_key.label: +"""Partition key""" + +max_retries.desc: +"""Max retry times if an error occurs when sending a request.""" + +max_retries.label: +"""Max Retries""" + +} diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index e4061f7cb..785d4065d 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -219,6 +219,9 @@ for dep in ${CT_DEPS}; do hstreamdb) FILES+=( '.ci/docker-compose-file/docker-compose-hstreamdb.yaml' ) ;; + kinesis) + FILES+=( '.ci/docker-compose-file/docker-compose-kinesis.yaml' ) + ;; *) echo "unknown_ct_dependency $dep" exit 1 From 191916211bd80ac9ff27b799f584405f96699717 Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Tue, 18 Jul 2023 12:05:07 +0200 Subject: [PATCH 24/73] fix(emqx): Change incompatible DB backend message log to a warning --- bin/emqx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/emqx b/bin/emqx index 802e4b1dd..29f309792 100755 --- a/bin/emqx +++ b/bin/emqx @@ -974,7 +974,7 @@ maybe_warn_default_cookie() { ## using Mnesia DB backend. if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then - logerr "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend." + logwarn "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend." export EMQX_NODE__DB_BACKEND=mnesia export EMQX_NODE__DB_ROLE=core fi From 4a889c09940acb01d3b7a2c6552daaebfcda8483 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Tue, 11 Jul 2023 14:28:04 -0300 Subject: [PATCH 25/73] chore: bump ekka -> 0.15.6 https://github.com/emqx/erlang-rocksdb/releases/tag/1.8.0-emqx-1 --- apps/emqx/rebar.config | 2 +- changes/ce/feat-11291.en.md | 1 + mix.exs | 4 ++-- rebar.config | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changes/ce/feat-11291.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 2ee2b4ac5..a0b085adc 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -28,7 +28,7 @@ {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.5"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.6"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.13"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, diff --git a/changes/ce/feat-11291.en.md b/changes/ce/feat-11291.en.md new file mode 100644 index 000000000..82f49a65f --- /dev/null +++ b/changes/ce/feat-11291.en.md @@ -0,0 +1 @@ +Updated RocksDB version to 1.8.0-emqx-1 via ekka update to 0.15.6. diff --git a/mix.exs b/mix.exs index 548e32d36..c77bc7ee8 100644 --- a/mix.exs +++ b/mix.exs @@ -54,8 +54,8 @@ defmodule EMQXUmbrella.MixProject do {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, - {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-11", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.15.5", override: true}, + {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.15.6", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.11", override: true}, diff --git a/rebar.config b/rebar.config index 0f6864c5e..ea13c6caf 100644 --- a/rebar.config +++ b/rebar.config @@ -61,8 +61,8 @@ , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} - , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-11"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.5"}}} + , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.6"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.11"}}} From 99378355ea9fac2118a4e2af17bef65e01fbea9b Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 17 Jul 2023 16:58:33 -0300 Subject: [PATCH 26/73] ci: bump builder image -> 5.1-3 --- .ci/docker-compose-file/docker-compose-kafka.yaml | 2 +- .ci/docker-compose-file/docker-compose.yaml | 2 +- .github/workflows/build_and_push_docker_images.yaml | 2 +- .github/workflows/build_packages.yaml | 2 +- .github/workflows/check_deps_integrity.yaml | 2 +- .github/workflows/code_style_check.yaml | 2 +- .github/workflows/elixir_apps_check.yaml | 2 +- .github/workflows/elixir_deps_check.yaml | 2 +- .github/workflows/elixir_release.yml | 2 +- .github/workflows/performance_test.yaml | 2 +- .github/workflows/run_conf_tests.yaml | 2 +- .github/workflows/run_fvt_tests.yaml | 2 +- .github/workflows/run_relup_tests.yaml | 2 +- .github/workflows/run_test_cases.yaml | 2 +- Makefile | 2 +- deploy/docker/Dockerfile | 2 +- scripts/buildx.sh | 4 ++-- scripts/relup-test/start-relup-test-cluster.sh | 2 +- 18 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.ci/docker-compose-file/docker-compose-kafka.yaml b/.ci/docker-compose-file/docker-compose-kafka.yaml index 3269865b4..18ef3991c 100644 --- a/.ci/docker-compose-file/docker-compose-kafka.yaml +++ b/.ci/docker-compose-file/docker-compose-kafka.yaml @@ -18,7 +18,7 @@ services: - /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret kdc: hostname: kdc.emqx.net - image: ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04 + image: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04 container_name: kdc.emqx.net expose: - 88 # kdc diff --git a/.ci/docker-compose-file/docker-compose.yaml b/.ci/docker-compose-file/docker-compose.yaml index 3c6996a94..504358419 100644 --- a/.ci/docker-compose-file/docker-compose.yaml +++ b/.ci/docker-compose-file/docker-compose.yaml @@ -3,7 +3,7 @@ version: '3.9' services: erlang: container_name: erlang - image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04} + image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04} env_file: - conf.env environment: diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 3dd00a9d7..cd3117b3d 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -25,7 +25,7 @@ jobs: prepare: runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" outputs: PROFILE: ${{ steps.get_profile.outputs.PROFILE }} diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 8af46df09..982c28ed3 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -21,7 +21,7 @@ on: jobs: prepare: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index 02b43d16d..4a079c570 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -6,7 +6,7 @@ on: jobs: check_deps_integrity: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index 13046b255..ff1043b81 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -5,7 +5,7 @@ on: [pull_request] jobs: code_style_check: runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 840311328..31f70690e 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -9,7 +9,7 @@ jobs: elixir_apps_check: runs-on: ubuntu-22.04 # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" strategy: fail-fast: false diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index aa5e3d367..a7e086bb1 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -8,7 +8,7 @@ on: jobs: elixir_deps_check: runs-on: ubuntu-22.04 - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04 steps: - name: Checkout diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index 8e95c6746..73807bfa0 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -17,7 +17,7 @@ jobs: profile: - emqx - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04 steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml index 73e9dc91f..10b040271 100644 --- a/.github/workflows/performance_test.yaml +++ b/.github/workflows/performance_test.yaml @@ -23,7 +23,7 @@ jobs: prepare: runs-on: ubuntu-latest if: github.repository_owner == 'emqx' - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04 outputs: BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} diff --git a/.github/workflows/run_conf_tests.yaml b/.github/workflows/run_conf_tests.yaml index 91b82329b..80fe53133 100644 --- a/.github/workflows/run_conf_tests.yaml +++ b/.github/workflows/run_conf_tests.yaml @@ -26,7 +26,7 @@ jobs: profile: - emqx - emqx-enterprise - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: AutoModality/action-clean@v1 - uses: actions/checkout@v3 diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 64ab63042..2f5b6f5ac 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -17,7 +17,7 @@ jobs: prepare: runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-debian11 + container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index 75725e2e2..0400d0502 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -15,7 +15,7 @@ concurrency: jobs: relup_test_plan: runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 1b0df7d42..1fcbc5d35 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -286,7 +286,7 @@ jobs: - ct - ct_docker runs-on: ubuntu-22.04 - container: "ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04" + container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04" steps: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 diff --git a/Makefile b/Makefile index bb2694d5d..c76ae0f6e 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ REBAR = $(CURDIR)/rebar3 BUILD = $(CURDIR)/build SCRIPTS = $(CURDIR)/scripts export EMQX_RELUP ?= true -export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 +export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11 export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index ed2bcc857..5242970d2 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_FROM=ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 +ARG BUILD_FROM=ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11 ARG RUN_FROM=debian:11-slim FROM ${BUILD_FROM} AS builder diff --git a/scripts/buildx.sh b/scripts/buildx.sh index 988d0afb1..462ab6612 100755 --- a/scripts/buildx.sh +++ b/scripts/buildx.sh @@ -9,7 +9,7 @@ ## example: ## ./scripts/buildx.sh --profile emqx --pkgtype tgz --arch arm64 \ -## --builder ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11 +## --builder ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11 set -euo pipefail @@ -24,7 +24,7 @@ help() { echo "--arch amd64|arm64: Target arch to build the EMQX package for" echo "--src_dir : EMQX source code in this dir, default to PWD" echo "--builder : Builder image to pull" - echo " E.g. ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-debian11" + echo " E.g. ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11" } die() { diff --git a/scripts/relup-test/start-relup-test-cluster.sh b/scripts/relup-test/start-relup-test-cluster.sh index fea8b546e..9cc0eaffe 100755 --- a/scripts/relup-test/start-relup-test-cluster.sh +++ b/scripts/relup-test/start-relup-test-cluster.sh @@ -22,7 +22,7 @@ WEBHOOK="webhook.$NET" BENCH="bench.$NET" COOKIE='this-is-a-secret' ## Erlang image is needed to run webhook server and emqtt-bench -ERLANG_IMAGE="ghcr.io/emqx/emqx-builder/5.1-0:1.14.5-25.3.2-1-ubuntu20.04" +ERLANG_IMAGE="ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04" # builder has emqtt-bench installed BENCH_IMAGE="$ERLANG_IMAGE" From 143cdb9e440289bc4941de41967f895103fc2835 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Tue, 18 Jul 2023 12:49:52 +0200 Subject: [PATCH 27/73] chore: bump jq to v0.3.10 --- mix.exs | 2 +- rebar.config.erl | 2 +- scripts/macos-sign-binaries.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mix.exs b/mix.exs index 548e32d36..c2bcbcd6b 100644 --- a/mix.exs +++ b/mix.exs @@ -842,7 +842,7 @@ defmodule EMQXUmbrella.MixProject do defp jq_dep() do if enable_jq?(), - do: [{:jq, github: "emqx/jq", tag: "v0.3.9", override: true}], + do: [{:jq, github: "emqx/jq", tag: "v0.3.10", override: true}], else: [] end diff --git a/rebar.config.erl b/rebar.config.erl index 5a3ec1355..9be3c68e2 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -42,7 +42,7 @@ quicer() -> {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.114"}}}. jq() -> - {jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.9"}}}. + {jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.10"}}}. deps(Config) -> {deps, OldDeps} = lists:keyfind(deps, 1, Config), diff --git a/scripts/macos-sign-binaries.sh b/scripts/macos-sign-binaries.sh index 5fffa925e..2e348c6c8 100755 --- a/scripts/macos-sign-binaries.sh +++ b/scripts/macos-sign-binaries.sh @@ -57,7 +57,7 @@ codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=r codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \ "${REL_DIR}"/lib/os_mon-*/priv/bin/{cpu_sup,memsup} codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime \ - "${REL_DIR}"/lib/jq-*/priv/{jq_nif1.so,libjq.1.dylib,libonig.4.dylib,erlang_jq_port} + "${REL_DIR}"/lib/jq-*/priv/{jq_nif1.so,libjq.1.dylib,libonig.5.dylib,erlang_jq_port} # other files from runtime and dependencies for f in \ asn1rt_nif.so \ From a2cfb95780414452c144357e37ff1adf34dc747a Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Tue, 18 Jul 2023 12:55:01 +0200 Subject: [PATCH 28/73] chore: add changelog entry --- changes/ce/feat-11290.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/feat-11290.en.md diff --git a/changes/ce/feat-11290.en.md b/changes/ce/feat-11290.en.md new file mode 100644 index 000000000..f5f3ae26f --- /dev/null +++ b/changes/ce/feat-11290.en.md @@ -0,0 +1 @@ +Updated `jq` dependency to version 0.3.10 which includes `oniguruma` library update to version 6.9.8 with few minor security fixes. From 0a00c392822ac30e65d5b16cffb93a7b5c7d474d Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Tue, 18 Jul 2023 15:43:09 +0200 Subject: [PATCH 29/73] chore(ps): phase out existing session persistence mechanism * It's not really working currently in a cluster setup. * It's not really working well. * It would be much easier to integrate `emqx_durable_storage` w/o the need to support existing solution. --- apps/emqx/src/emqx_broker.erl | 1 - apps/emqx/src/emqx_channel.erl | 90 ++++++------------- apps/emqx/src/emqx_cm.erl | 72 +++++---------- apps/emqx/src/emqx_session.erl | 32 +++++-- .../test/emqx_persistent_session_SUITE.erl | 5 +- .../emqx_eviction_agent_channel_SUITE.erl | 8 +- 6 files changed, 84 insertions(+), 124 deletions(-) diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index 859f6fc91..afa6dffe5 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -224,7 +224,6 @@ publish(Msg) when is_record(Msg, message) -> }), []; Msg1 = #message{topic = Topic} -> - emqx_persistent_session:persist_message(Msg1), _ = emqx_persistent_session_ds:persist_message(Msg1), route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1)) end. diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 5e594d35f..d879e5a2d 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -61,8 +61,7 @@ %% Export for emqx_channel implementations -export([ - maybe_nack/1, - maybe_mark_as_delivered/2 + maybe_nack/1 ]). %% Exports for CT @@ -199,11 +198,6 @@ info(timers, #channel{timers = Timers}) -> set_conn_state(ConnState, Channel) -> Channel#channel{conn_state = ConnState}. -set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo}) -> - %% Assume that this is also an updated session. Allow side effect. - Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), - Channel#channel{session = Session1}. - -spec stats(channel()) -> emqx_types:stats(). stats(#channel{session = undefined}) -> emqx_pd:get_counters(?CHANNEL_METRICS); @@ -417,10 +411,10 @@ handle_in( case emqx_session:puback(ClientInfo, PacketId, Session) of {ok, Msg, NSession} -> ok = after_message_acked(ClientInfo, Msg, Properties), - {ok, set_session(NSession, Channel)}; + {ok, Channel#channel{session = NSession}}; {ok, Msg, Publishes, NSession} -> ok = after_message_acked(ClientInfo, Msg, Properties), - handle_out(publish, Publishes, set_session(NSession, Channel)); + handle_out(publish, Publishes, Channel#channel{session = NSession}); {error, ?RC_PACKET_IDENTIFIER_IN_USE} -> ?SLOG(warning, #{msg => "puback_packetId_inuse", packetId => PacketId}), ok = emqx_metrics:inc('packets.puback.inuse'), @@ -438,7 +432,7 @@ handle_in( case emqx_session:pubrec(ClientInfo, PacketId, Session) of {ok, Msg, NSession} -> ok = after_message_acked(ClientInfo, Msg, Properties), - NChannel = set_session(NSession, Channel), + NChannel = Channel#channel{session = NSession}, handle_out(pubrel, {PacketId, ?RC_SUCCESS}, NChannel); {error, RC = ?RC_PACKET_IDENTIFIER_IN_USE} -> ?SLOG(warning, #{msg => "pubrec_packetId_inuse", packetId => PacketId}), @@ -458,7 +452,7 @@ handle_in( ) -> case emqx_session:pubrel(ClientInfo, PacketId, Session) of {ok, NSession} -> - NChannel = set_session(NSession, Channel), + NChannel = Channel#channel{session = NSession}, handle_out(pubcomp, {PacketId, ?RC_SUCCESS}, NChannel); {error, RC = ?RC_PACKET_IDENTIFIER_NOT_FOUND} -> ?SLOG(warning, #{msg => "pubrec_packetId_not_found", packetId => PacketId}), @@ -473,9 +467,9 @@ handle_in( ) -> case emqx_session:pubcomp(ClientInfo, PacketId, Session) of {ok, NSession} -> - {ok, set_session(NSession, Channel)}; + {ok, Channel#channel{session = NSession}}; {ok, Publishes, NSession} -> - handle_out(publish, Publishes, set_session(NSession, Channel)); + handle_out(publish, Publishes, Channel#channel{session = NSession}); {error, ?RC_PACKET_IDENTIFIER_IN_USE} -> ok = emqx_metrics:inc('packets.pubcomp.inuse'), {ok, Channel}; @@ -734,7 +728,7 @@ do_publish( case emqx_session:publish(ClientInfo, PacketId, Msg, Session) of {ok, PubRes, NSession} -> RC = pubrec_reason_code(PubRes), - NChannel0 = set_session(NSession, Channel), + NChannel0 = Channel#channel{session = NSession}, NChannel1 = ensure_timer(await_timer, NChannel0), NChannel2 = ensure_quota(PubRes, NChannel1), handle_out(pubrec, {PacketId, RC}, NChannel2); @@ -830,7 +824,7 @@ do_subscribe( NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel), case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of {ok, NSession} -> - {QoS, set_session(NSession, Channel)}; + {QoS, Channel#channel{session = NSession}}; {error, RC} -> ?SLOG( warning, @@ -869,7 +863,7 @@ do_unsubscribe( TopicFilter1 = emqx_mountpoint:mount(MountPoint, TopicFilter), case emqx_session:unsubscribe(ClientInfo, TopicFilter1, SubOpts, Session) of {ok, NSession} -> - {?RC_SUCCESS, set_session(NSession, Channel)}; + {?RC_SUCCESS, Channel#channel{session = NSession}}; {error, RC} -> {RC, Channel} end. @@ -898,7 +892,7 @@ process_disconnect(ReasonCode, Properties, Channel) -> maybe_update_expiry_interval( #{'Session-Expiry-Interval' := Interval}, - Channel = #channel{conninfo = ConnInfo, clientinfo = ClientInfo} + Channel = #channel{conninfo = ConnInfo} ) -> EI = timer:seconds(Interval), OldEI = maps:get(expiry_interval, ConnInfo, 0), @@ -907,12 +901,11 @@ maybe_update_expiry_interval( Channel; false -> NChannel = Channel#channel{conninfo = ConnInfo#{expiry_interval => EI}}, - ClientID = maps:get(clientid, ClientInfo, undefined), %% Check if the client turns off persistence (turning it on is disallowed) case EI =:= 0 andalso OldEI > 0 of true -> - S = emqx_persistent_session:discard(ClientID, NChannel#channel.session), - set_session(S, NChannel); + NSession = emqx_session:unpersist(NChannel#channel.session), + NChannel#channel{session = NSession}; false -> NChannel end @@ -956,9 +949,7 @@ handle_deliver( Delivers1 = maybe_nack(Delivers), Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), - NChannel = set_session(NSession, Channel), - %% We consider queued/dropped messages as delivered since they are now in the session state. - maybe_mark_as_delivered(Session, Delivers), + NChannel = Channel#channel{session = NSession}, {ok, NChannel}; handle_deliver( Delivers, @@ -976,11 +967,10 @@ handle_deliver( ) of {ok, Publishes, NSession} -> - NChannel = set_session(NSession, Channel), - maybe_mark_as_delivered(NSession, Delivers), + NChannel = Channel#channel{session = NSession}, handle_out(publish, Publishes, ensure_timer(retry_timer, NChannel)); {ok, NSession} -> - {ok, set_session(NSession, Channel)} + {ok, Channel#channel{session = NSession}} end. %% Nack delivers from shared subscription @@ -996,15 +986,6 @@ not_nacked({deliver, _Topic, Msg}) -> true end. -maybe_mark_as_delivered(Session, Delivers) -> - case emqx_session:info(is_persistent, Session) of - false -> - skip; - true -> - SessionID = emqx_session:info(id, Session), - emqx_persistent_session:mark_as_delivered(SessionID, Delivers) - end. - %%-------------------------------------------------------------------- %% Handle outgoing packet %%-------------------------------------------------------------------- @@ -1096,11 +1077,11 @@ return_connack(AckPacket, Channel) -> ignore -> {ok, Replies, Channel}; {ok, Publishes, NSession} -> - NChannel0 = Channel#channel{ + NChannel1 = Channel#channel{ resuming = false, - pendings = [] + pendings = [], + session = NSession }, - NChannel1 = set_session(NSession, NChannel0), {Packets, NChannel2} = do_deliver(Publishes, NChannel1), Outgoing = [{outgoing, Packets} || length(Packets) > 0], {ok, Replies ++ Outgoing, NChannel2} @@ -1345,9 +1326,10 @@ handle_timeout( ) -> case emqx_session:retry(ClientInfo, Session) of {ok, NSession} -> - {ok, clean_timer(retry_timer, set_session(NSession, Channel))}; + NChannel = Channel#channel{session = NSession}, + {ok, clean_timer(retry_timer, NChannel)}; {ok, Publishes, Timeout, NSession} -> - NChannel = set_session(NSession, Channel), + NChannel = Channel#channel{session = NSession}, handle_out(publish, Publishes, reset_timer(retry_timer, Timeout, NChannel)) end; handle_timeout( @@ -1363,9 +1345,11 @@ handle_timeout( ) -> case emqx_session:expire(ClientInfo, awaiting_rel, Session) of {ok, NSession} -> - {ok, clean_timer(await_timer, set_session(NSession, Channel))}; + NChannel = Channel#channel{session = NSession}, + {ok, clean_timer(await_timer, NChannel)}; {ok, Timeout, NSession} -> - {ok, reset_timer(await_timer, Timeout, set_session(NSession, Channel))} + NChannel = Channel#channel{session = NSession}, + {ok, reset_timer(await_timer, Timeout, NChannel)} end; handle_timeout(_TRef, expire_session, Channel) -> shutdown(expired, Channel); @@ -1453,25 +1437,11 @@ terminate(Reason, Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg %% if will_msg still exists when the session is terminated, it %% must be published immediately. WillMsg =/= undefined andalso publish_will_msg(ClientInfo, WillMsg), - (Reason =:= expired) andalso persist_if_session(Channel), run_terminate_hook(Reason, Channel). -persist_if_session(#channel{session = Session} = Channel) -> - case emqx_session:is_session(Session) of - true -> - _ = emqx_persistent_session:persist( - Channel#channel.clientinfo, - Channel#channel.conninfo, - Channel#channel.session - ), - ok; - false -> - ok - end. - -run_terminate_hook(_Reason, #channel{session = undefined} = _Channel) -> +run_terminate_hook(_Reason, #channel{session = undefined}) -> ok; -run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session} = _Channel) -> +run_terminate_hook(Reason, #channel{clientinfo = ClientInfo, session = Session}) -> emqx_session:terminate(ClientInfo, Reason, Session). %%-------------------------------------------------------------------- @@ -2096,11 +2066,9 @@ maybe_resume_session(#channel{ session = Session, resuming = true, pendings = Pendings, - clientinfo = #{clientid := ClientId} = ClientInfo + clientinfo = ClientInfo }) -> {ok, Publishes, Session1} = emqx_session:replay(ClientInfo, Session), - %% We consider queued/dropped messages as delivered since they are now in the session state. - emqx_persistent_session:mark_as_delivered(ClientId, Pendings), case emqx_session:deliver(ClientInfo, Pendings, Session1) of {ok, Session2} -> {ok, Publishes, Session2}; diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index c193cea44..6d18fef34 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -277,65 +277,24 @@ open_session(true, ClientInfo = #{clientid := ClientId}, ConnInfo) -> Self = self(), CleanStart = fun(_) -> ok = discard_session(ClientId), - ok = emqx_persistent_session:discard_if_present(ClientId), - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}} + ok = emqx_session:destroy(ClientId), + create_register_session(ClientInfo, ConnInfo, Self) end, emqx_cm_locker:trans(ClientId, CleanStart); open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> Self = self(), ResumeStart = fun(_) -> - CreateSess = - fun() -> - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist( - ClientInfo, ConnInfo, Session - ), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}} - end, case takeover_session(ClientId) of - {persistent, Session} -> - %% This is a persistent session without a managing process. - {Session1, Pendings} = - emqx_persistent_session:resume(ClientInfo, ConnInfo, Session), - register_channel(ClientId, Self, ConnInfo), - - {ok, #{ - session => clean_session(Session1), - present => true, - pendings => clean_pendings(Pendings) - }}; {living, ConnMod, ChanPid, Session} -> ok = emqx_session:resume(ClientInfo, Session), case wrap_rpc(emqx_cm_proto_v2:takeover_finish(ConnMod, ChanPid)) of {ok, Pendings} -> - Session1 = emqx_persistent_session:persist( - ClientInfo, ConnInfo, Session - ), - register_channel(ClientId, Self, ConnInfo), - {ok, #{ - session => clean_session(Session1), - present => true, - pendings => clean_pendings(Pendings) - }}; + clean_register_session(Session, Pendings, ClientInfo, ConnInfo, Self); {error, _} -> - CreateSess() + create_register_session(ClientInfo, ConnInfo, Self) end; - {expired, OldSession} -> - _ = emqx_persistent_session:discard(ClientId, OldSession), - Session = create_session(ClientInfo, ConnInfo), - Session1 = emqx_persistent_session:persist( - ClientInfo, - ConnInfo, - Session - ), - register_channel(ClientId, Self, ConnInfo), - {ok, #{session => Session1, present => false}}; none -> - CreateSess() + create_register_session(ClientInfo, ConnInfo, Self) end end, emqx_cm_locker:trans(ClientId, ResumeStart). @@ -347,6 +306,19 @@ create_session(ClientInfo, ConnInfo) -> ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]), Session. +create_register_session(ClientInfo = #{clientid := ClientId}, ConnInfo, ChanPid) -> + Session = create_session(ClientInfo, ConnInfo), + ok = register_channel(ClientId, ChanPid, ConnInfo), + {ok, #{session => Session, present => false}}. + +clean_register_session(Session, Pendings, #{clientid := ClientId}, ConnInfo, ChanPid) -> + ok = register_channel(ClientId, ChanPid, ConnInfo), + {ok, #{ + session => clean_session(Session), + present => true, + pendings => clean_pendings(Pendings) + }}. + get_session_confs(#{zone := Zone, clientid := ClientId}, #{ receive_maximum := MaxInflight, expiry_interval := EI }) -> @@ -385,7 +357,7 @@ get_mqtt_conf(Zone, Key) -> takeover_session(ClientId) -> case lookup_channels(ClientId) of [] -> - emqx_persistent_session:lookup(ClientId); + emqx_session:lookup(ClientId); [ChanPid] -> takeover_session(ClientId, ChanPid); ChanPids -> @@ -417,16 +389,16 @@ takeover_session(ClientId, Pid) -> %% request_stepdown/3 R == unexpected_exception -> - emqx_persistent_session:lookup(ClientId); + emqx_session:lookup(ClientId); % rpc_call/3 _:{'EXIT', {noproc, _}} -> - emqx_persistent_session:lookup(ClientId) + emqx_session:lookup(ClientId) end. do_takeover_session(ClientId, ChanPid) when node(ChanPid) == node() -> case get_chann_conn_mod(ClientId, ChanPid) of undefined -> - emqx_persistent_session:lookup(ClientId); + emqx_session:lookup(ClientId); ConnMod when is_atom(ConnMod) -> case request_stepdown({takeover, 'begin'}, ConnMod, ChanPid) of {ok, Session} -> diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 25bee629e..db0059709 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -47,13 +47,18 @@ -include("emqx_mqtt.hrl"). -include("logger.hrl"). -include("types.hrl"). --include_lib("snabbkaffe/include/snabbkaffe.hrl"). -ifdef(TEST). -compile(export_all). -compile(nowarn_export_all). -endif. +-export([ + lookup/1, + destroy/1, + unpersist/1 +]). + -export([init/1]). -export([ @@ -226,6 +231,23 @@ init(Opts) -> created_at = erlang:system_time(millisecond) }. +-spec lookup(emqx_types:clientid()) -> none. +lookup(_ClientId) -> + % NOTE + % This is a stub. This session impl has no backing store, thus always `none`. + none. + +-spec destroy(emqx_types:clientid()) -> ok. +destroy(_ClientId) -> + % NOTE + % This is a stub. This session impl has no backing store, thus always `ok`. + ok. + +-spec unpersist(session()) -> session(). +unpersist(Session) -> + ok = destroy(Session#session.clientid), + Session#session{is_persistent = false}. + %%-------------------------------------------------------------------- %% Info, Stats %%-------------------------------------------------------------------- @@ -242,6 +264,8 @@ info(Keys, Session) when is_list(Keys) -> [{Key, info(Key, Session)} || Key <- Keys]; info(id, #session{id = Id}) -> Id; +info(clientid, #session{clientid = ClientId}) -> + ClientId; info(is_persistent, #session{is_persistent = Bool}) -> Bool; info(subscriptions, #session{subscriptions = Subs}) -> @@ -321,13 +345,12 @@ subscribe( ClientInfo = #{clientid := ClientId}, TopicFilter, SubOpts, - Session = #session{id = SessionID, is_persistent = IsPS, subscriptions = Subs} + Session = #session{subscriptions = Subs} ) -> IsNew = not maps:is_key(TopicFilter, Subs), case IsNew andalso is_subscriptions_full(Session) of false -> ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts), - ok = emqx_persistent_session:add_subscription(TopicFilter, SessionID, IsPS), ok = emqx_hooks:run( 'session.subscribed', [ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}] @@ -355,12 +378,11 @@ unsubscribe( ClientInfo, TopicFilter, UnSubOpts, - Session = #session{id = SessionID, subscriptions = Subs, is_persistent = IsPS} + Session = #session{subscriptions = Subs} ) -> case maps:find(TopicFilter, Subs) of {ok, SubOpts} -> ok = emqx_broker:unsubscribe(TopicFilter), - ok = emqx_persistent_session:remove_subscription(TopicFilter, SessionID, IsPS), ok = emqx_hooks:run( 'session.unsubscribed', [ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)] diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index cc583c632..07cfabc70 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -31,7 +31,10 @@ all() -> [ - {group, persistent_store_enabled}, + % NOTE + % Tests are disabled while existing session persistence impl is being + % phased out. + % {group, persistent_store_enabled}, {group, persistent_store_disabled} ]. diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl index 3fd21f389..936df0b0c 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl @@ -30,12 +30,8 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]). -init_per_testcase(t_persistence, Config) -> - emqx_config:put([persistent_session_store, enabled], true), - {ok, _} = emqx_persistent_session_sup:start_link(), - emqx_persistent_session:init_db_backend(), - ?assert(emqx_persistent_session:is_store_enabled()), - Config; +init_per_testcase(t_persistence, _Config) -> + {skip, "Existing session persistence implementation is being phased out"}; init_per_testcase(_TestCase, Config) -> Config. From 3ba6d3451170f2e4649ee4d6e47b065f27c8b180 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Tue, 18 Jul 2023 15:47:12 +0200 Subject: [PATCH 30/73] chore(eviction): phase out session persistence concerns --- .../src/emqx_eviction_agent.app.src | 2 +- .../src/emqx_eviction_agent_channel.erl | 12 ++---------- .../test/emqx_eviction_agent_channel_SUITE.erl | 3 --- 3 files changed, 3 insertions(+), 14 deletions(-) diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src index 7e6cf5b95..f9f6334c3 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src @@ -1,6 +1,6 @@ {application, emqx_eviction_agent, [ {description, "EMQX Eviction Agent"}, - {vsn, "5.0.1"}, + {vsn, "5.1.0"}, {registered, [ emqx_eviction_agent_sup, emqx_eviction_agent, diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl index 1369ee969..7d0bc7528 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl @@ -165,9 +165,8 @@ handle_cast(Msg, Channel) -> ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), {noreply, Channel}. -terminate(Reason, #{conninfo := ConnInfo, clientinfo := ClientInfo, session := Session} = Channel) -> +terminate(Reason, #{clientinfo := ClientInfo, session := Session} = Channel) -> ok = cancel_expiry_timer(Channel), - (Reason =:= expired) andalso emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), emqx_session:terminate(ClientInfo, Reason, Session). code_change(_OldVsn, Channel, _Extra) -> @@ -205,10 +204,7 @@ handle_deliver( Delivers1 = emqx_channel:maybe_nack(Delivers), Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), - NChannel = persist(NSession, Channel), - %% We consider queued/dropped messages as delivered since they are now in the session state. - emqx_channel:maybe_mark_as_delivered(Session, Delivers), - NChannel. + Channel#{session := NSession}. cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) -> _ = erlang:cancel_timer(TRef), @@ -334,10 +330,6 @@ channel(ConnInfo, ClientInfo) -> pendings => [] }. -persist(Session, #{clientinfo := ClientInfo, conninfo := ConnInfo} = Channel) -> - Session1 = emqx_persistent_session:persist(ClientInfo, ConnInfo, Session), - Channel#{session => Session1}. - info(Channel) -> #{ conninfo => maps:get(conninfo, Channel, undefined), diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl index 936df0b0c..764306ce8 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl @@ -36,9 +36,6 @@ init_per_testcase(_TestCase, Config) -> Config. end_per_testcase(t_persistence, Config) -> - emqx_config:put([persistent_session_store, enabled], false), - emqx_persistent_session:init_db_backend(), - ?assertNot(emqx_persistent_session:is_store_enabled()), Config; end_per_testcase(_TestCase, _Config) -> ok. From cb1e0cdc8910a8cd62601517374f01745fd85c84 Mon Sep 17 00:00:00 2001 From: Thales Macedo Garitezi Date: Mon, 17 Jul 2023 17:37:55 -0300 Subject: [PATCH 31/73] test(http): attempt to stabilize flaky tests --- .../test/emqx_bridge_http_SUITE.erl | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/apps/emqx_bridge_http/test/emqx_bridge_http_SUITE.erl b/apps/emqx_bridge_http/test/emqx_bridge_http_SUITE.erl index 30a01cf6a..6877c6c2e 100644 --- a/apps/emqx_bridge_http/test/emqx_bridge_http_SUITE.erl +++ b/apps/emqx_bridge_http/test/emqx_bridge_http_SUITE.erl @@ -208,6 +208,7 @@ bridge_async_config(#{port := Port} = Config) -> ConnectTimeout = maps:get(connect_timeout, Config, "1s"), RequestTimeout = maps:get(request_timeout, Config, "10s"), ResumeInterval = maps:get(resume_interval, Config, "1s"), + HealthCheckInterval = maps:get(health_check_interval, Config, "200ms"), ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"), LocalTopic = case maps:find(local_topic, Config) of @@ -232,7 +233,7 @@ bridge_async_config(#{port := Port} = Config) -> " body = \"${id}\"\n" " resource_opts {\n" " inflight_window = 100\n" - " health_check_interval = \"200ms\"\n" + " health_check_interval = \"~s\"\n" " max_buffer_bytes = \"1GB\"\n" " query_mode = \"~s\"\n" " request_ttl = \"~p\"\n" @@ -254,6 +255,7 @@ bridge_async_config(#{port := Port} = Config) -> LocalTopic, PoolSize, RequestTimeout, + HealthCheckInterval, QueryMode, ResourceRequestTTL, ResumeInterval @@ -350,19 +352,27 @@ t_send_async_connection_timeout(Config) -> port => Port, pool_size => 1, query_mode => "async", - connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "s", + connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "ms", request_timeout => "10s", + resume_interval => "200ms", + health_check_interval => "200ms", resource_request_ttl => "infinity" }), + ResourceId = emqx_bridge_resource:resource_id(BridgeID), + ?retry( + _Interval0 = 200, + _NAttempts0 = 20, + ?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), NumberOfMessagesToSend = 10, [ emqx_bridge:send_message(BridgeID, #{<<"id">> => Id}) || Id <- lists:seq(1, NumberOfMessagesToSend) ], - %% Make sure server recive all messages + %% Make sure server receives all messages ct:pal("Sent messages\n"), MessageIDs = maps:from_keys(lists:seq(1, NumberOfMessagesToSend), void), - receive_request_notifications(MessageIDs, ResponseDelayMS), + receive_request_notifications(MessageIDs, ResponseDelayMS, []), ok. t_async_free_retries(Config) -> @@ -569,15 +579,16 @@ do_t_async_retries(TestContext, Error, Fn) -> ), ok. -receive_request_notifications(MessageIDs, _ResponseDelay) when map_size(MessageIDs) =:= 0 -> +receive_request_notifications(MessageIDs, _ResponseDelay, _Acc) when map_size(MessageIDs) =:= 0 -> ok; -receive_request_notifications(MessageIDs, ResponseDelay) -> +receive_request_notifications(MessageIDs, ResponseDelay, Acc) -> receive {http_server, received, Req} -> RemainingMessageIDs = remove_message_id(MessageIDs, Req), - receive_request_notifications(RemainingMessageIDs, ResponseDelay) + receive_request_notifications(RemainingMessageIDs, ResponseDelay, [Req | Acc]) after (30 * 1000) -> - ct:pal("Waited to long time but did not get any message\n"), + ct:pal("Waited a long time but did not get any message"), + ct:pal("Messages received so far:\n ~p", [Acc]), ct:fail("All requests did not reach server at least once") end. From f541fc3e3def337633b0e84e2cb318b46cd17a51 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Thu, 13 Jul 2023 08:53:14 +0200 Subject: [PATCH 32/73] feat: build debian12 packages --- .github/workflows/build_and_push_docker_images.yaml | 2 +- .github/workflows/build_packages.yaml | 7 ++++--- .github/workflows/build_packages_cron.yaml | 2 +- .github/workflows/build_slim_packages.yaml | 2 +- .github/workflows/release.yaml | 2 ++ .github/workflows/run_emqx_app_tests.yaml | 2 +- .github/workflows/run_fvt_tests.yaml | 4 ++-- .github/workflows/run_test_cases.yaml | 4 ++-- 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index cd3117b3d..e8066b8bc 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -120,7 +120,7 @@ jobs: # NOTE: 'otp' and 'elixir' are to configure emqx-builder image # only support latest otp and elixir, not a matrix builder: - - 5.1-1 # update to latest + - 5.1-3 # update to latest otp: - 25.3.2-1 elixir: diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 982c28ed3..7985818da 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -181,6 +181,7 @@ jobs: - ubuntu22.04 - ubuntu20.04 - ubuntu18.04 + - debian12 - debian11 - debian10 - el9 @@ -192,7 +193,7 @@ jobs: - aws-arm64 - ubuntu-22.04 builder: - - 5.1-1 + - 5.1-3 elixir: - 1.14.5 with_elixir: @@ -208,7 +209,7 @@ jobs: arch: amd64 os: ubuntu22.04 build_machine: ubuntu-22.04 - builder: 5.1-1 + builder: 5.1-3 elixir: 1.14.5 with_elixir: 'yes' - profile: emqx @@ -216,7 +217,7 @@ jobs: arch: amd64 os: amzn2 build_machine: ubuntu-22.04 - builder: 5.1-1 + builder: 5.1-3 elixir: 1.14.5 with_elixir: 'yes' diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml index 3f8e728e0..09f68c256 100644 --- a/.github/workflows/build_packages_cron.yaml +++ b/.github/workflows/build_packages_cron.yaml @@ -32,7 +32,7 @@ jobs: - debian10 - amzn2023 builder: - - 5.1-1 + - 5.1-3 elixir: - 1.14.5 diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 50bb83451..a955b4a9b 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -35,7 +35,7 @@ jobs: - ["emqx-enterprise", "25.3.2-1", "amzn2023", "erlang"] - ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"] builder: - - 5.1-1 + - 5.1-3 elixir: - '1.14.5' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index fd097c546..1945caab0 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -87,6 +87,8 @@ jobs: push "debian/buster" "packages/$PROFILE-$VERSION-debian10-arm64.deb" push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-amd64.deb" push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb" + push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb" + push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb" push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb" push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb" push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb" diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index ddb29122f..40a630e76 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: builder: - - 5.1-1 + - 5.1-3 otp: - 25.3.2-1 # no need to use more than 1 version of Elixir, since tests diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 2f5b6f5ac..0bcdee93a 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -50,7 +50,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.1-1 + - 5.1-3 otp: - 25.3.2-1 elixir: @@ -123,7 +123,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.1-1 + - 5.1-3 otp: - 25.3.2-1 elixir: diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 1fcbc5d35..d43079d61 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -34,12 +34,12 @@ jobs: MATRIX="$(echo "${APPS}" | jq -c ' [ (.[] | select(.profile == "emqx") | . + { - builder: "5.1-1", + builder: "5.1-3", otp: "25.3.2-1", elixir: "1.14.5" }), (.[] | select(.profile == "emqx-enterprise") | . + { - builder: "5.1-1", + builder: "5.1-3", otp: ["25.3.2-1"][], elixir: "1.14.5" }) From cfcdc094b55bdd0f9d95248fc9bdf97a1fa01468 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Fri, 14 Jul 2023 14:00:54 +0200 Subject: [PATCH 33/73] ci: always upload artefacts on build_packages --- .github/workflows/build_packages.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 7985818da..1a76f7ad7 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -263,7 +263,6 @@ jobs: --builder "force_host" done - uses: actions/upload-artifact@v3 - if: success() with: name: ${{ matrix.profile }} path: _packages/${{ matrix.profile }}/ From 2b5bd9f1830993dda3fd6ea5129dd5fb9c5401fe Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Mon, 17 Jul 2023 12:23:54 +0200 Subject: [PATCH 34/73] ci: separate steps to build and test packages --- .github/workflows/build_packages.yaml | 42 ++++++++++++++++----------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 1a76f7ad7..14b3e75ff 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -234,11 +234,7 @@ jobs: ref: ${{ github.event.inputs.branch_or_tag }} fetch-depth: 0 - - name: build emqx packages - env: - ELIXIR: ${{ matrix.elixir }} - PROFILE: ${{ matrix.profile }} - ARCH: ${{ matrix.arch }} + - name: fix workdir run: | set -eu git config --global --add safe.directory "$GITHUB_WORKSPACE" @@ -248,20 +244,32 @@ jobs: cd /emqx fi echo "pwd is $PWD" - PKGTYPES="tgz pkg" - IS_ELIXIR=${{ matrix.with_elixir }} + + - name: build emqx packages + env: + PROFILE: ${{ matrix.profile }} + IS_ELIXIR: ${{ matrix.with_elixir }} + ACLOCAL_PATH: "/usr/share/aclocal:/usr/local/share/aclocal" + run: | + set -eu if [ "${IS_ELIXIR:-}" == 'yes' ]; then - PKGTYPES="tgz" + make "${PROFILE}-elixir-tgz" + else + make "${PROFILE}-tgz" + make "${PROFILE}-pkg" + fi + - name: test emqx packages + env: + PROFILE: ${{ matrix.profile }} + IS_ELIXIR: ${{ matrix.with_elixir }} + run: | + set -eu + if [ "${IS_ELIXIR:-}" == 'yes' ]; then + ./scripts/pkg-tests.sh "${PROFILE}-elixir-tgz" + else + ./scripts/pkg-tests.sh "${PROFILE}-tgz" + ./scripts/pkg-tests.sh "${PROFILE}-pkg" fi - for PKGTYPE in ${PKGTYPES}; - do - ./scripts/buildx.sh \ - --profile "${PROFILE}" \ - --pkgtype "${PKGTYPE}" \ - --arch "${ARCH}" \ - --elixir "${IS_ELIXIR}" \ - --builder "force_host" - done - uses: actions/upload-artifact@v3 with: name: ${{ matrix.profile }} From 2594998fd035987189366169ee6245386d93a734 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Mon, 17 Jul 2023 13:58:49 +0200 Subject: [PATCH 35/73] ci: self-hosted runners for build-packages --- .github/workflows/build_packages.yaml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 14b3e75ff..bb6b46612 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -191,7 +191,7 @@ jobs: - amzn2023 build_machine: - aws-arm64 - - ubuntu-22.04 + - aws-amd64 builder: - 5.1-3 elixir: @@ -200,7 +200,7 @@ jobs: - 'no' exclude: - arch: arm64 - build_machine: ubuntu-22.04 + build_machine: aws-amd64 - arch: amd64 build_machine: aws-arm64 include: @@ -208,15 +208,7 @@ jobs: otp: 25.3.2-1 arch: amd64 os: ubuntu22.04 - build_machine: ubuntu-22.04 - builder: 5.1-3 - elixir: 1.14.5 - with_elixir: 'yes' - - profile: emqx - otp: 25.3.2-1 - arch: amd64 - os: amzn2 - build_machine: ubuntu-22.04 + build_machine: aws-amd64 builder: 5.1-3 elixir: 1.14.5 with_elixir: 'yes' @@ -227,7 +219,6 @@ jobs: steps: - uses: AutoModality/action-clean@v1 - if: matrix.build_machine == 'aws-arm64' - uses: actions/checkout@v3 with: From 42627a3d426bd1dc6b57484773631afba7e0913a Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 18 Jul 2023 11:51:39 +0200 Subject: [PATCH 36/73] fix: ps -ef to detect defunct pid --- bin/emqx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/emqx b/bin/emqx index 802e4b1dd..6c4ed376f 100755 --- a/bin/emqx +++ b/bin/emqx @@ -811,8 +811,8 @@ is_down() { PID="$1" if ps -p "$PID" >/dev/null; then # still around - # shellcheck disable=SC2009 # this grep pattern is not a part of the progra names - if ps -p "$PID" | $GREP -q 'defunct'; then + # shellcheck disable=SC2009 # this grep pattern is not a part of the program names + if ps -efp "$PID" | $GREP -q 'defunct'; then # zombie state, print parent pid parent="$(ps -o ppid= -p "$PID" | tr -d ' ')" logwarn "$PID is marked , parent: $(ps -p "$parent")" From 8293fce7f278cad6f696a44b98f8136358f5a9f4 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Thu, 13 Jul 2023 14:20:24 +0200 Subject: [PATCH 37/73] chore: fix pkg-tests.sh --- scripts/pkg-tests.sh | 79 +++++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/scripts/pkg-tests.sh b/scripts/pkg-tests.sh index c17c47ad2..e6ab05d11 100755 --- a/scripts/pkg-tests.sh +++ b/scripts/pkg-tests.sh @@ -46,6 +46,7 @@ export SCRIPTS="${CODE_PATH}/scripts" export EMQX_NAME export PACKAGE_PATH="${CODE_PATH}/_packages/${EMQX_NAME}" export RELUP_PACKAGE_PATH="${CODE_PATH}/_upgrade_base" +export PAHO_MQTT_TESTING_PATH="${PAHO_MQTT_TESTING_PATH:-/paho-mqtt-testing}" SYSTEM="$("$SCRIPTS"/get-distro.sh)" @@ -64,7 +65,7 @@ fi PACKAGE_VERSION="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}")" PACKAGE_VERSION_LONG="$("$CODE_PATH"/pkg-vsn.sh "${EMQX_NAME}" --long --elixir "${IS_ELIXIR}")" PACKAGE_NAME="${EMQX_NAME}-${PACKAGE_VERSION_LONG}" -PACKAGE_FILE_NAME="${PACKAGE_NAME}.${PKG_SUFFIX}" +PACKAGE_FILE_NAME="${PACKAGE_FILE_NAME:-${PACKAGE_NAME}.${PKG_SUFFIX}}" PACKAGE_FILE="${PACKAGE_PATH}/${PACKAGE_FILE_NAME}" if ! [ -f "$PACKAGE_FILE" ]; then @@ -75,9 +76,21 @@ fi emqx_prepare(){ mkdir -p "${PACKAGE_PATH}" - if [ ! -d "/paho-mqtt-testing" ]; then - git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git /paho-mqtt-testing + if [ ! -d "${PAHO_MQTT_TESTING_PATH}" ]; then + git clone -b develop-4.0 https://github.com/emqx/paho.mqtt.testing.git "${PAHO_MQTT_TESTING_PATH}" fi + # Debian 12 complains if we don't use venv + case "${SYSTEM:-}" in + debian12) + apt-get update -y && apt-get install -y virtualenv + virtualenv venv + # https://www.shellcheck.net/wiki/SC1091 + # shellcheck source=/dev/null + source ./venv/bin/activate + ;; + *) + ;; + esac pip3 install pytest } @@ -97,36 +110,22 @@ emqx_test(){ # fi # sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins - echo "running ${packagename} start" - if ! "${PACKAGE_PATH}"/emqx/bin/emqx start; then - cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true - cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true - exit 1 - fi - "$SCRIPTS/test/emqx-smoke-test.sh" 127.0.0.1 18083 - pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic - if ! "${PACKAGE_PATH}"/emqx/bin/emqx stop; then - cat "${PACKAGE_PATH}"/emqx/log/erlang.log.1 || true - cat "${PACKAGE_PATH}"/emqx/log/emqx.log.1 || true - exit 1 - fi - echo "running ${packagename} stop" + run_test "${PACKAGE_PATH}/emqx/bin" "${PACKAGE_PATH}/emqx/log" "${PACKAGE_PATH}/emqx/releases/emqx_vars" + rm -rf "${PACKAGE_PATH}"/emqx ;; "deb") dpkg -i "${PACKAGE_PATH}/${packagename}" - if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "ii" ] + if [ "$(dpkg -l | grep ${EMQX_NAME} | awk '{print $1}')" != "ii" ] then echo "package install error" exit 1 fi - echo "running ${packagename} start" - run_test - echo "running ${packagename} stop" + run_test "/usr/bin" "/var/log/emqx" "$(dpkg -L ${EMQX_NAME} | grep emqx_vars)" dpkg -r "${EMQX_NAME}" - if [ "$(dpkg -l |grep emqx |awk '{print $1}')" != "rc" ] + if [ "$(dpkg -l | grep ${EMQX_NAME} | awk '{print $1}')" != "rc" ] then echo "package remove error" exit 1 @@ -146,6 +145,10 @@ emqx_test(){ # el8 is fine with python3 true ;; + "el9") + # el9 is fine with python3 + true + ;; *) alternatives --list | grep python && alternatives --set python /usr/bin/python2 ;; @@ -161,12 +164,10 @@ emqx_test(){ exit 1 fi - echo "running ${packagename} start" - run_test - echo "running ${packagename} stop" + run_test "/usr/bin" "/var/log/emqx" "$(rpm -ql ${EMQX_NAME} | grep emqx_vars)" rpm -e "${EMQX_NAME}" - if [ "$(rpm -q emqx)" != "package emqx is not installed" ];then + if [ "$(rpm -q ${EMQX_NAME})" != "package ${EMQX_NAME} is not installed" ];then echo "package uninstall error" exit 1 fi @@ -175,8 +176,10 @@ emqx_test(){ } run_test(){ + local bin_dir="$1" + local log_dir="$2" + local emqx_env_vars="$3" # sed -i '/emqx_telemetry/d' /var/lib/emqx/loaded_plugins - emqx_env_vars=$(dirname "$(readlink "$(command -v emqx)")")/../releases/emqx_vars if [ -f "$emqx_env_vars" ]; then @@ -194,21 +197,21 @@ EOF echo "Error: cannot locate emqx_vars" exit 1 fi - if ! emqx 'start'; then - cat /var/log/emqx/erlang.log.1 || true - cat /var/log/emqx/emqx.log.1 || true + echo "running ${packagename} start" + if ! "${bin_dir}/emqx" 'start'; then + echo "ERROR: failed_to_start_emqx" + cat "${log_dir}/erlang.log.1" || true + cat "${log_dir}/emqx.log.1" || true exit 1 fi "$SCRIPTS/test/emqx-smoke-test.sh" 127.0.0.1 18083 - pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic - # shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions - ps -ef | grep -E '\-progname\s.+emqx\s' - if ! emqx 'stop'; then - # shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions - ps -ef | grep -E '\-progname\s.+emqx\s' + pytest -v "${PAHO_MQTT_TESTING_PATH}"/interoperability/test_client/V5/test_connect.py::test_basic + "${bin_dir}/emqx" ping + echo "running ${packagename} stop" + if ! "${bin_dir}/emqx" 'stop'; then echo "ERROR: failed_to_stop_emqx_with_the_stop_command" - cat /var/log/emqx/erlang.log.1 || true - cat /var/log/emqx/emqx.log.1 || true + cat "${log_dir}/erlang.log.1" || true + cat "${log_dir}/emqx.log.1" || true exit 1 fi } From 73c4bcbc360d23bfbab6dc1be08ea02a564b0331 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Tue, 18 Jul 2023 12:23:59 +0200 Subject: [PATCH 38/73] chore: add changelog --- changes/ce/feat-11289.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/feat-11289.en.md diff --git a/changes/ce/feat-11289.en.md b/changes/ce/feat-11289.en.md new file mode 100644 index 000000000..4514b153d --- /dev/null +++ b/changes/ce/feat-11289.en.md @@ -0,0 +1 @@ +Release packages for Debian 12. From c9985758d970bf7d3408317cb382a84966b3356e Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Tue, 18 Jul 2023 20:35:21 +0300 Subject: [PATCH 39/73] fix(emqx_rule_engine): fix typo in `behaviour` module attribute The type resulted in missing `rule_engine` config after importing data from a backup file. Fixes: EMQX-10590 --- apps/emqx_rule_engine/src/emqx_rule_engine.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.erl b/apps/emqx_rule_engine/src/emqx_rule_engine.erl index f0640c7dc..66c82d3a1 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.erl @@ -18,7 +18,7 @@ -behaviour(gen_server). -behaviour(emqx_config_handler). --behaiour(emqx_config_backup). +-behaviour(emqx_config_backup). -include("rule_engine.hrl"). -include_lib("emqx/include/logger.hrl"). From 2954ff7300d1471157d38068617304e305c4fc59 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Tue, 18 Jul 2023 20:39:46 +0300 Subject: [PATCH 40/73] feat: add `topic_metrics` and `slow_subs` configuration to data import/export Fixes: EMQX-10590 --- .../src/emqx_mgmt_data_backup.erl | 3 +- apps/emqx_modules/src/emqx_modules.app.src | 2 +- apps/emqx_modules/src/emqx_modules_conf.erl | 41 +++++++++++++++++- .../test/emqx_modules_conf_SUITE.erl | 42 +++++++++++++++++-- changes/ce/fix-11296.en.md | 4 ++ 5 files changed, 84 insertions(+), 8 deletions(-) create mode 100644 changes/ce/fix-11296.en.md diff --git a/apps/emqx_management/src/emqx_mgmt_data_backup.erl b/apps/emqx_management/src/emqx_mgmt_data_backup.erl index bdb9cf666..b83a46903 100644 --- a/apps/emqx_management/src/emqx_mgmt_data_backup.erl +++ b/apps/emqx_management/src/emqx_mgmt_data_backup.erl @@ -57,7 +57,8 @@ <<"flapping_detect">>, <<"broker">>, <<"force_gc">>, - <<"zones">> + <<"zones">>, + <<"slow_subs">> ]). -define(DEFAULT_OPTS, #{}). diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 1b934e015..4de1c2e9b 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.17"}, + {vsn, "5.0.18"}, {modules, []}, {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_modules_app, []}}, diff --git a/apps/emqx_modules/src/emqx_modules_conf.erl b/apps/emqx_modules/src/emqx_modules_conf.erl index 69a69cb12..e5604280d 100644 --- a/apps/emqx_modules/src/emqx_modules_conf.erl +++ b/apps/emqx_modules/src/emqx_modules_conf.erl @@ -18,6 +18,7 @@ -module(emqx_modules_conf). -behaviour(emqx_config_handler). +-behaviour(emqx_config_backup). %% Load/Unload -export([ @@ -37,6 +38,11 @@ post_config_update/5 ]). +%% Data backup +-export([ + import_config/1 +]). + %%-------------------------------------------------------------------- %% Load/Unload %%-------------------------------------------------------------------- @@ -78,6 +84,20 @@ remove_topic_metrics(Topic) -> {error, Reason} -> {error, Reason} end. +%%-------------------------------------------------------------------- +%% Data backup (Topic-Metrics) +%%-------------------------------------------------------------------- + +import_config(#{<<"topic_metrics">> := Topics}) -> + case emqx_conf:update([topic_metrics], {merge_topics, Topics}, #{override_to => cluster}) of + {ok, _} -> + {ok, #{root_key => topic_metrics, changed => []}}; + Error -> + {error, #{root_key => topic_metrics, reason => Error}} + end; +import_config(_RawConf) -> + {ok, #{root_key => topic_metrics, changed => []}}. + %%-------------------------------------------------------------------- %% Config Handler %%-------------------------------------------------------------------- @@ -103,7 +123,13 @@ pre_config_update(_, {remove_topic_metrics, Topic0}, RawConf) -> {ok, RawConf -- [Topic]}; _ -> {error, not_found} - end. + end; +pre_config_update(_, {merge_topics, NewConf}, OldConf) -> + KeyFun = fun(#{<<"topic">> := T}) -> T end, + MergedConf = emqx_utils:merge_lists(OldConf, NewConf, KeyFun), + {ok, MergedConf}; +pre_config_update(_, NewConf, _OldConf) -> + {ok, NewConf}. -spec post_config_update( list(atom()), @@ -113,7 +139,6 @@ pre_config_update(_, {remove_topic_metrics, Topic0}, RawConf) -> emqx_config:app_envs() ) -> ok | {ok, Result :: any()} | {error, Reason :: term()}. - post_config_update( _, {add_topic_metrics, Topic}, @@ -135,6 +160,18 @@ post_config_update( case emqx_topic_metrics:deregister(Topic) of ok -> ok; {error, Reason} -> {error, Reason} + end; +post_config_update(_, _UpdateReq, NewConfig, OldConfig, _AppEnvs) -> + #{ + removed := Removed, + added := Added + } = emqx_utils:diff_lists(NewConfig, OldConfig, fun(#{topic := T}) -> T end), + Deregistered = [emqx_topic_metrics:deregister(T) || #{topic := T} <- Removed], + Registered = [emqx_topic_metrics:register(T) || #{topic := T} <- Added], + Errs = [Res || Res <- Registered ++ Deregistered, Res =/= ok], + case Errs of + [] -> ok; + _ -> {error, Errs} end. %%-------------------------------------------------------------------- diff --git a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl index 14e477bf9..b95cc2fe3 100644 --- a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl +++ b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl @@ -39,12 +39,46 @@ end_per_suite(_Conf) -> init_per_testcase(_CaseName, Conf) -> Conf. +end_per_testcase(_CaseName, _Conf) -> + [emqx_modules_conf:remove_topic_metrics(T) || T <- emqx_modules_conf:topic_metrics()], + ok. + %%-------------------------------------------------------------------- %% Cases %%-------------------------------------------------------------------- -t_topic_metrics_list(_) -> - ok. - t_topic_metrics_add_remove(_) -> - ok. + ?assertEqual([], emqx_modules_conf:topic_metrics()), + ?assertMatch({ok, _}, emqx_modules_conf:add_topic_metrics(<<"test-topic">>)), + ?assertEqual([<<"test-topic">>], emqx_modules_conf:topic_metrics()), + ?assertEqual(ok, emqx_modules_conf:remove_topic_metrics(<<"test-topic">>)), + ?assertEqual([], emqx_modules_conf:topic_metrics()), + ?assertMatch({error, _}, emqx_modules_conf:remove_topic_metrics(<<"test-topic">>)). + +t_topic_metrics_merge_update(_) -> + ?assertEqual([], emqx_modules_conf:topic_metrics()), + ?assertMatch({ok, _}, emqx_modules_conf:add_topic_metrics(<<"test-topic-before-import1">>)), + ?assertMatch({ok, _}, emqx_modules_conf:add_topic_metrics(<<"test-topic-before-import2">>)), + ImportConf = #{ + <<"topic_metrics">> => + [ + #{<<"topic">> => <<"imported_topic1">>}, + #{<<"topic">> => <<"imported_topic2">>} + ] + }, + ?assertMatch({ok, _}, emqx_modules_conf:import_config(ImportConf)), + ExpTopics = [ + <<"test-topic-before-import1">>, + <<"test-topic-before-import2">>, + <<"imported_topic1">>, + <<"imported_topic2">> + ], + ?assertEqual(ExpTopics, emqx_modules_conf:topic_metrics()). + +t_topic_metrics_update(_) -> + ?assertEqual([], emqx_modules_conf:topic_metrics()), + ?assertMatch({ok, _}, emqx_modules_conf:add_topic_metrics(<<"test-topic-before-update1">>)), + ?assertMatch({ok, _}, emqx_modules_conf:add_topic_metrics(<<"test-topic-before-update2">>)), + UpdConf = [#{<<"topic">> => <<"new_topic1">>}, #{<<"topic">> => <<"new_topic2">>}], + ?assertMatch({ok, _}, emqx_conf:update([topic_metrics], UpdConf, #{override_to => cluster})), + ?assertEqual([<<"new_topic1">>, <<"new_topic2">>], emqx_modules_conf:topic_metrics()). diff --git a/changes/ce/fix-11296.en.md b/changes/ce/fix-11296.en.md new file mode 100644 index 000000000..fca72c01a --- /dev/null +++ b/changes/ce/fix-11296.en.md @@ -0,0 +1,4 @@ +Import additional configurations from EMQX backup file (`emqx ctl import` command): + - rule_engine (previously not imported due to the bug) + - topic_metrics (previously not implemented) + - slow_subs (previously not implemented). From 0cd23511341de3819835ff060bf40958ed2fe7c3 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Wed, 19 Jul 2023 14:11:10 +0200 Subject: [PATCH 41/73] refactor(session): hide `no_local` logic behind enqueue / deliver This is a part of effort to minimize `emqx_session` module interface to simplify adding alternative session implementations. --- apps/emqx/src/emqx_channel.erl | 24 +++------- apps/emqx/src/emqx_session.erl | 47 +++++++++---------- .../emqx_persistent_session.erl | 16 +++---- apps/emqx/test/emqx_channel_SUITE.erl | 13 +++-- .../src/emqx_eviction_agent_channel.erl | 21 ++------- 5 files changed, 47 insertions(+), 74 deletions(-) diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index d879e5a2d..9f47c8f81 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -924,18 +924,13 @@ handle_deliver( Delivers, Channel = #channel{ takeover = true, - pendings = Pendings, - session = Session, - clientinfo = #{clientid := ClientId} = ClientInfo + pendings = Pendings } ) -> %% NOTE: Order is important here. While the takeover is in %% progress, the session cannot enqueue messages, since it already %% passed on the queue to the new connection in the session state. - NPendings = lists:append( - Pendings, - emqx_session:ignore_local(ClientInfo, maybe_nack(Delivers), ClientId, Session) - ), + NPendings = lists:append(Pendings, maybe_nack(Delivers)), {ok, Channel#channel{pendings = NPendings}}; handle_deliver( Delivers, @@ -943,12 +938,11 @@ handle_deliver( conn_state = disconnected, takeover = false, session = Session, - clientinfo = #{clientid := ClientId} = ClientInfo + clientinfo = ClientInfo } ) -> Delivers1 = maybe_nack(Delivers), - Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), - NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), + NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session), NChannel = Channel#channel{session = NSession}, {ok, NChannel}; handle_deliver( @@ -956,16 +950,10 @@ handle_deliver( Channel = #channel{ session = Session, takeover = false, - clientinfo = #{clientid := ClientId} = ClientInfo + clientinfo = ClientInfo } ) -> - case - emqx_session:deliver( - ClientInfo, - emqx_session:ignore_local(ClientInfo, Delivers, ClientId, Session), - Session - ) - of + case emqx_session:deliver(ClientInfo, Delivers, Session) of {ok, Publishes, NSession} -> NChannel = Channel#channel{session = NSession}, handle_out(publish, Publishes, ensure_timer(retry_timer, NChannel)); diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index db0059709..b15fcd2ed 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -309,27 +309,6 @@ info(created_at, #session{created_at = CreatedAt}) -> -spec stats(session()) -> emqx_types:stats(). stats(Session) -> info(?STATS_KEYS, Session). -%%-------------------------------------------------------------------- -%% Ignore local messages -%%-------------------------------------------------------------------- - -ignore_local(ClientInfo, Delivers, Subscriber, Session) -> - Subs = info(subscriptions, Session), - lists:filter( - fun({deliver, Topic, #message{from = Publisher} = Msg}) -> - case maps:find(Topic, Subs) of - {ok, #{nl := 1}} when Subscriber =:= Publisher -> - ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, no_local]), - ok = emqx_metrics:inc('delivery.dropped'), - ok = emqx_metrics:inc('delivery.dropped.no_local'), - false; - _ -> - true - end - end, - Delivers - ). - %%-------------------------------------------------------------------- %% Client -> Broker: SUBSCRIBE %%-------------------------------------------------------------------- @@ -610,7 +589,10 @@ deliver_msg( MarkedMsg = mark_begin_deliver(Msg), Inflight1 = emqx_inflight:insert(PacketId, with_ts(MarkedMsg), Inflight), {ok, [Publish], next_pkt_id(Session#session{inflight = Inflight1})} - end. + end; +deliver_msg(ClientInfo, {drop, Msg, Reason}, Session) -> + handle_dropped(ClientInfo, Msg, Reason, Session), + {ok, Session}. -spec enqueue( emqx_types:clientinfo(), @@ -629,7 +611,10 @@ enqueue(ClientInfo, Delivers, Session) when is_list(Delivers) -> enqueue(ClientInfo, #message{} = Msg, Session = #session{mqueue = Q}) -> {Dropped, NewQ} = emqx_mqueue:in(Msg, Q), (Dropped =/= undefined) andalso handle_dropped(ClientInfo, Dropped, Session), - Session#session{mqueue = NewQ}. + Session#session{mqueue = NewQ}; +enqueue(ClientInfo, {drop, Msg, Reason}, Session) -> + handle_dropped(ClientInfo, Msg, Reason, Session), + Session. handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mqueue = Q}) -> Payload = emqx_message:to_log_map(Msg), @@ -666,8 +651,18 @@ handle_dropped(ClientInfo, Msg = #message{qos = QoS, topic = Topic}, #session{mq ) end. +handle_dropped(ClientInfo, Msg, Reason, _Session) -> + ok = emqx_hooks:run('delivery.dropped', [ClientInfo, Msg, Reason]), + ok = emqx_metrics:inc('delivery.dropped'), + ok = emqx_metrics:inc('delivery.dropped.no_local'). + enrich_deliver({deliver, Topic, Msg}, Session = #session{subscriptions = Subs}) -> - enrich_subopts(get_subopts(Topic, Subs), Msg, Session). + enrich_deliver(Msg, maps:find(Topic, Subs), Session). + +enrich_deliver(Msg = #message{from = ClientId}, {ok, #{nl := 1}}, #session{clientid = ClientId}) -> + {drop, Msg, no_local}; +enrich_deliver(Msg, SubOpts, Session) -> + enrich_subopts(mk_subopts(SubOpts), Msg, Session). maybe_ack(Msg) -> emqx_shared_sub:maybe_ack(Msg). @@ -675,8 +670,8 @@ maybe_ack(Msg) -> maybe_nack(Msg) -> emqx_shared_sub:maybe_nack_dropped(Msg). -get_subopts(Topic, SubMap) -> - case maps:find(Topic, SubMap) of +mk_subopts(SubOpts) -> + case SubOpts of {ok, #{nl := Nl, qos := QoS, rap := Rap, subid := SubId}} -> [{nl, Nl}, {qos, QoS}, {rap, Rap}, {subid, SubId}]; {ok, #{nl := Nl, qos := QoS, rap := Rap}} -> diff --git a/apps/emqx/src/persistent_session/emqx_persistent_session.erl b/apps/emqx/src/persistent_session/emqx_persistent_session.erl index bfda233e1..111154571 100644 --- a/apps/emqx/src/persistent_session/emqx_persistent_session.erl +++ b/apps/emqx/src/persistent_session/emqx_persistent_session.erl @@ -272,7 +272,7 @@ remove_subscription(_TopicFilter, _SessionID, false = _IsPersistent) -> %% Must be called inside a emqx_cm_locker transaction. -spec resume(emqx_types:clientinfo(), emqx_types:conninfo(), emqx_session:session()) -> {emqx_session:session(), [emqx_types:deliver()]}. -resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> +resume(ClientInfo, ConnInfo, Session) -> SessionID = emqx_session:info(id, Session), ?tp(ps_resuming, #{from => db, sid => SessionID}), @@ -281,7 +281,6 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 1. Get pending messages from DB. ?tp(ps_initial_pendings, #{sid => SessionID}), Pendings1 = pending(SessionID), - Pendings2 = emqx_session:ignore_local(ClientInfo, Pendings1, ClientID, Session), ?tp(ps_got_initial_pendings, #{ sid => SessionID, msgs => Pendings1 @@ -290,11 +289,11 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 2. Enqueue messages to mimic that the process was alive %% when the messages were delivered. ?tp(ps_persist_pendings, #{sid => SessionID}), - Session1 = emqx_session:enqueue(ClientInfo, Pendings2, Session), + Session1 = emqx_session:enqueue(ClientInfo, Pendings1, Session), Session2 = persist(ClientInfo, ConnInfo, Session1), - mark_as_delivered(SessionID, Pendings2), + mark_as_delivered(SessionID, Pendings1), ?tp(ps_persist_pendings_msgs, #{ - msgs => Pendings2, + msgs => Pendings1, sid => SessionID }), @@ -312,11 +311,10 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 5. Get pending messages from DB until we find all markers. ?tp(ps_marker_pendings, #{sid => SessionID}), MarkerIDs = [Marker || {_, Marker} <- NodeMarkers], - Pendings3 = pending(SessionID, MarkerIDs), - Pendings4 = emqx_session:ignore_local(ClientInfo, Pendings3, ClientID, Session), + Pendings2 = pending(SessionID, MarkerIDs), ?tp(ps_marker_pendings_msgs, #{ sid => SessionID, - msgs => Pendings4 + msgs => Pendings2 }), %% 6. Get pending messages from writers. @@ -329,7 +327,7 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 7. Drain the inbox and usort the messages %% with the pending messages. (Should be done by caller.) - {Session2, Pendings4 ++ WriterPendings}. + {Session2, Pendings2 ++ WriterPendings}. resume_begin(Nodes, SessionID) -> Res = emqx_persistent_session_proto_v1:resume_begin(Nodes, self(), SessionID), diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index f266dbcfa..3cd0d411f 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -584,7 +584,7 @@ t_handle_deliver(_) -> t_handle_deliver_nl(_) -> ClientInfo = clientinfo(#{clientid => <<"clientid">>}), - Session = session(#{subscriptions => #{<<"t1">> => #{nl => 1}}}), + Session = session(ClientInfo, #{subscriptions => #{<<"t1">> => #{nl => 1}}}), Channel = channel(#{clientinfo => ClientInfo, session => Session}), Msg = emqx_message:make(<<"clientid">>, ?QOS_1, <<"t1">>, <<"qos1">>), NMsg = emqx_message:set_flag(nl, Msg), @@ -1071,11 +1071,14 @@ connpkt(Props) -> password = <<"passwd">> }. -session() -> session(#{}). -session(InitFields) when is_map(InitFields) -> +session() -> session(#{zone => default, clientid => <<"fake-test">>}, #{}). +session(InitFields) -> session(#{zone => default, clientid => <<"fake-test">>}, InitFields). +session(ClientInfo, InitFields) when is_map(InitFields) -> Conf = emqx_cm:get_session_confs( - #{zone => default, clientid => <<"fake-test">>}, #{ - receive_maximum => 0, expiry_interval => 0 + ClientInfo, + #{ + receive_maximum => 0, + expiry_interval => 0 } ), Session = emqx_session:init(Conf), diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl index 7d0bc7528..f6ad11167 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent_channel.erl @@ -180,30 +180,24 @@ handle_deliver( Delivers, #{ takeover := true, - pendings := Pendings, - session := Session, - clientinfo := #{clientid := ClientId} = ClientInfo + pendings := Pendings } = Channel ) -> %% NOTE: Order is important here. While the takeover is in %% progress, the session cannot enqueue messages, since it already %% passed on the queue to the new connection in the session state. - NPendings = lists:append( - Pendings, - emqx_session:ignore_local(ClientInfo, emqx_channel:maybe_nack(Delivers), ClientId, Session) - ), + NPendings = lists:append(Pendings, emqx_channel:maybe_nack(Delivers)), Channel#{pendings => NPendings}; handle_deliver( Delivers, #{ takeover := false, session := Session, - clientinfo := #{clientid := ClientId} = ClientInfo + clientinfo := ClientInfo } = Channel ) -> Delivers1 = emqx_channel:maybe_nack(Delivers), - Delivers2 = emqx_session:ignore_local(ClientInfo, Delivers1, ClientId, Session), - NSession = emqx_session:enqueue(ClientInfo, Delivers2, Session), + NSession = emqx_session:enqueue(ClientInfo, Delivers1, Session), Channel#{session := NSession}. cancel_expiry_timer(#{expiry_timer := TRef}) when is_reference(TRef) -> @@ -248,12 +242,7 @@ open_session(ConnInfo, #{clientid := ClientId} = ClientInfo) -> Pendings1 = lists:usort(lists:append(Pendings0, emqx_utils:drain_deliver())), NSession = emqx_session:enqueue( ClientInfo, - emqx_session:ignore_local( - ClientInfo, - emqx_channel:maybe_nack(Pendings1), - ClientId, - Session - ), + emqx_channel:maybe_nack(Pendings1), Session ), NChannel = Channel#{session => NSession}, From dd31487b4a9e5a74a98a79393870037b6c4c4865 Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Wed, 19 Jul 2023 14:12:56 +0200 Subject: [PATCH 42/73] refactor(session): drop `is_session/1` helper as useless This is a part of effort to minimize `emqx_session` module interface to simplify adding alternative session implementations. --- apps/emqx/src/emqx_session.erl | 5 ----- 1 file changed, 5 deletions(-) diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index b15fcd2ed..3036887de 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -64,7 +64,6 @@ -export([ info/1, info/2, - is_session/1, stats/1, obtain_next_pkt_id/1, get_mqueue/1 @@ -88,7 +87,6 @@ enqueue/3, dequeue/2, filter_queue/2, - ignore_local/4, retry/2, terminate/3 ]). @@ -252,9 +250,6 @@ unpersist(Session) -> %% Info, Stats %%-------------------------------------------------------------------- -is_session(#session{}) -> true; -is_session(_) -> false. - %% @doc Get infos of the session. -spec info(session()) -> emqx_types:infos(). info(Session) -> From ff4119e8b3b00fe57594ab900dbc9a8b9585411f Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Wed, 19 Jul 2023 16:57:09 +0300 Subject: [PATCH 43/73] chore(emqx_config): fix typo in the log message --- apps/emqx/src/emqx_config.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 58f53b134..450f3e1b0 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -641,7 +641,7 @@ backup_and_write(Path, Content) -> ?SLOG(error, #{ msg => "failed_to_save_conf_file", hint => - "The updated cluster config is note saved on this node, please check the file system.", + "The updated cluster config is not saved on this node, please check the file system.", filename => TmpFile, reason => Reason }), From e1e4c64a30585adfce25aa00aacaed1dfd3e6a2d Mon Sep 17 00:00:00 2001 From: Andrew Mayorov Date: Thu, 20 Jul 2023 21:19:08 +0200 Subject: [PATCH 44/73] refactor(session): allow peeking at mqueue less intrusively --- apps/emqx/src/emqx_channel.erl | 9 +++------ apps/emqx/src/emqx_connection.erl | 10 ++++++---- apps/emqx/src/emqx_session.erl | 6 +----- apps/emqx/test/emqx_shared_sub_SUITE.erl | 7 +++++-- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 6e74126ca..af4d7be56 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -151,7 +151,7 @@ info(Channel) -> maps:from_list(info(?INFO_KEYS, Channel)). --spec info(list(atom()) | atom(), channel()) -> term(). +-spec info(list(atom()) | atom() | tuple(), channel()) -> term(). info(Keys, Channel) when is_list(Keys) -> [{Key, info(Key, Channel)} || Key <- Keys]; info(conninfo, #channel{conninfo = ConnInfo}) -> @@ -180,6 +180,8 @@ info(username, #channel{clientinfo = ClientInfo}) -> maps:get(username, ClientInfo, undefined); info(session, #channel{session = Session}) -> maybe_apply(fun emqx_session:info/1, Session); +info({session, Info}, #channel{session = Session}) -> + maybe_apply(fun(S) -> emqx_session:info(Info, S) end, Session); info(conn_state, #channel{conn_state = ConnState}) -> ConnState; info(keepalive, #channel{keepalive = Keepalive}) -> @@ -1195,8 +1197,6 @@ handle_call( ChanInfo1 = info(NChannel), emqx_cm:set_chan_info(ClientId, ChanInfo1#{sockinfo => SockInfo}), reply(ok, reset_timer(alive_timer, NChannel)); -handle_call(get_mqueue, Channel) -> - reply({ok, get_mqueue(Channel)}, Channel); handle_call(Req, Channel) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), reply(ignored, Channel). @@ -2240,6 +2240,3 @@ get_mqtt_conf(Zone, Key, Default) -> set_field(Name, Value, Channel) -> Pos = emqx_utils:index_of(Name, record_info(fields, channel)), setelement(Pos + 1, Channel, Value). - -get_mqueue(#channel{session = Session}) -> - emqx_session:get_mqueue(Session). diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 1172460ac..70eb0d1e4 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -44,6 +44,7 @@ -export([ info/1, + info/2, stats/1 ]). @@ -221,11 +222,10 @@ info(CPid) when is_pid(CPid) -> call(CPid, info); info(State = #state{channel = Channel}) -> ChanInfo = emqx_channel:info(Channel), - SockInfo = maps:from_list( - info(?INFO_KEYS, State) - ), + SockInfo = maps:from_list(info(?INFO_KEYS, State)), ChanInfo#{sockinfo => SockInfo}. +-spec info([atom()] | atom() | tuple(), pid() | state()) -> term(). info(Keys, State) when is_list(Keys) -> [{Key, info(Key, State)} || Key <- Keys]; info(socktype, #state{transport = Transport, socket = Socket}) -> @@ -241,7 +241,9 @@ info(stats_timer, #state{stats_timer = StatsTimer}) -> info(limiter, #state{limiter = Limiter}) -> Limiter; info(limiter_timer, #state{limiter_timer = Timer}) -> - Timer. + Timer; +info({channel, Info}, #state{channel = Channel}) -> + emqx_channel:info(Info, Channel). %% @doc Get stats of the connection/channel. -spec stats(pid() | state()) -> emqx_types:stats(). diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 3036887de..d838e95d0 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -65,8 +65,7 @@ info/1, info/2, stats/1, - obtain_next_pkt_id/1, - get_mqueue/1 + obtain_next_pkt_id/1 ]). -export([ @@ -955,6 +954,3 @@ age(Now, Ts) -> Now - Ts. set_field(Name, Value, Session) -> Pos = emqx_utils:index_of(Name, record_info(fields, session)), setelement(Pos + 1, Session, Value). - -get_mqueue(#session{mqueue = Q}) -> - emqx_mqueue:to_list(Q). diff --git a/apps/emqx/test/emqx_shared_sub_SUITE.erl b/apps/emqx/test/emqx_shared_sub_SUITE.erl index 4726f1111..6439981f6 100644 --- a/apps/emqx/test/emqx_shared_sub_SUITE.erl +++ b/apps/emqx/test/emqx_shared_sub_SUITE.erl @@ -758,13 +758,16 @@ t_qos1_random_dispatch_if_all_members_are_down(Config) when is_list(Config) -> {ok, _} = emqtt:publish(ConnPub, Topic, <<"hello11">>, 1), ct:sleep(100), - {ok, Msgs1} = gen_server:call(Pid1, get_mqueue), - {ok, Msgs2} = gen_server:call(Pid2, get_mqueue), + Msgs1 = emqx_mqueue:to_list(get_mqueue(Pid1)), + Msgs2 = emqx_mqueue:to_list(get_mqueue(Pid2)), %% assert the message is in mqueue (because socket is closed) ?assertMatch([#message{payload = <<"hello11">>}], Msgs1 ++ Msgs2), emqtt:stop(ConnPub), ok. +get_mqueue(ConnPid) -> + emqx_connection:info({channel, {session, mqueue}}, sys:get_state(ConnPid)). + %% No ack, QoS 2 subscriptions, %% client1 receives one message, send pubrec, then suspend %% client2 acts normal (auto_ack=true) From 3a3b843f4a781e65c183ccbe7f1f93514c20ee55 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Fri, 21 Jul 2023 13:16:54 +0200 Subject: [PATCH 45/73] fix(machine): add emqx_bridge_kinesis to reboot_lists.eterm --- apps/emqx_machine/priv/reboot_lists.eterm | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/emqx_machine/priv/reboot_lists.eterm b/apps/emqx_machine/priv/reboot_lists.eterm index 3b821c096..500a47d8f 100644 --- a/apps/emqx_machine/priv/reboot_lists.eterm +++ b/apps/emqx_machine/priv/reboot_lists.eterm @@ -97,6 +97,7 @@ emqx_bridge_tdengine, emqx_bridge_timescale, emqx_bridge_sqlserver, + emqx_bridge_kinesis, emqx_oracle, emqx_bridge_oracle, emqx_bridge_rabbitmq, From c2c3da073f3fb22e3cefa3d7a9f8917edc3c9f80 Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Fri, 21 Jul 2023 08:46:51 +0200 Subject: [PATCH 46/73] chore: v5.1.2 --- apps/emqx/include/emqx_release.hrl | 2 +- deploy/charts/emqx/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 2412de99e..09b435e6e 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,7 +32,7 @@ %% `apps/emqx/src/bpapi/README.md' %% Opensource edition --define(EMQX_RELEASE_CE, "5.1.1"). +-define(EMQX_RELEASE_CE, "5.1.2"). %% Enterprise edition -define(EMQX_RELEASE_EE, "5.1.1-alpha.2"). diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index a2262da8b..67166708b 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.1.1 +version: 5.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.1.1 +appVersion: 5.1.2 From 7df0c6a808ee9a546fd0bfddbdc1a5b5512fdd1b Mon Sep 17 00:00:00 2001 From: Ivan Dyachkov Date: Fri, 21 Jul 2023 13:26:55 +0200 Subject: [PATCH 47/73] docs: Generate changelog for v5.1.2 --- changes/v5.1.2.en.md | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 changes/v5.1.2.en.md diff --git a/changes/v5.1.2.en.md b/changes/v5.1.2.en.md new file mode 100644 index 000000000..a6b7d127b --- /dev/null +++ b/changes/v5.1.2.en.md @@ -0,0 +1,72 @@ +# v5.1.2 + +## Enhancements + +- [#11124](https://github.com/emqx/emqx/pull/11124) Release packages for Amazon Linux 2023 + +- [#11226](https://github.com/emqx/emqx/pull/11226) Unify the listener switch to `enable`, while being compatible with the previous `enabled`. + +- [#11249](https://github.com/emqx/emqx/pull/11249) Support HTTP API for setting alarm watermark of license. + +- [#11251](https://github.com/emqx/emqx/pull/11251) Add `/cluster/topology` HTTP API endpoint + + `GET` request to the endpoint returns the cluster topology: connections between RLOG core and replicant nodes. + +- [#11253](https://github.com/emqx/emqx/pull/11253) The Webhook/HTTP bridge has been refactored to its own Erlang application. This allows for more flexibility in the future, and also allows for the bridge to be run as a standalone application. + +- [#11289](https://github.com/emqx/emqx/pull/11289) Release packages for Debian 12. + +- [#11290](https://github.com/emqx/emqx/pull/11290) Updated `jq` dependency to version 0.3.10 which includes `oniguruma` library update to version 6.9.8 with few minor security fixes. + +- [#11291](https://github.com/emqx/emqx/pull/11291) Updated RocksDB version to 1.8.0-emqx-1 via ekka update to 0.15.6. + +- [#11236](https://github.com/emqx/emqx/pull/11236) Improve the speed of clients querying in HTTP API `/clients` endpoint with default parameters + +## Bug Fixes + +- [#11065](https://github.com/emqx/emqx/pull/11065) Avoid logging irrelevant error messages during EMQX shutdown. + +- [#11077](https://github.com/emqx/emqx/pull/11077) Fixes crash when updating binding with a non-integer port. + +- [#11184](https://github.com/emqx/emqx/pull/11184) Config value for `max_packet_size` has a max value of 256MB defined by protocol. This is now enforced and any configuration with a value greater than that will break. + +- [#11192](https://github.com/emqx/emqx/pull/11192) Fix produces valid HOCON file when atom type is used. + Remove unnecessary `"` from HOCON file. + +- [#11195](https://github.com/emqx/emqx/pull/11195) Avoid to create duplicated subscription by HTTP API or client in Stomp gateway + +- [#11206](https://github.com/emqx/emqx/pull/11206) Make the username and password params of CoAP client to optional in connection mode. + +- [#11208](https://github.com/emqx/emqx/pull/11208) Fix the issue of abnormal data statistics for LwM2M client. + +- [#11211](https://github.com/emqx/emqx/pull/11211) Consistently return `404` for `DELETE` operations on non-existent resources. + +- [#11214](https://github.com/emqx/emqx/pull/11214) Fix a bug where node configuration may fail to synchronize correctly when joining the cluster. + +- [#11229](https://github.com/emqx/emqx/pull/11229) Fixed an issue preventing plugins from starting/stopping after changing configuration via `emqx ctl conf load`. + +- [#11237](https://github.com/emqx/emqx/pull/11237) The `headers` default value in /prometheus API should be a map instead of a list. + +- [#11250](https://github.com/emqx/emqx/pull/11250) Fix while a WebSocket packet contains more than one MQTT packet, the order of MQTT packets will be reversed. + + +- [#11271](https://github.com/emqx/emqx/pull/11271) Ensure that the range of percentage type is from 0% to 100%. + +- [#11272](https://github.com/emqx/emqx/pull/11272) Fix a typo in the log, when EMQX received an abnormal `PUBREL` packet, the `pubrel` was mistakenly typo as `pubrec`. + +- [#11281](https://github.com/emqx/emqx/pull/11281) Restored support for the special `$queue/` shared subscription. + +- [#11294](https://github.com/emqx/emqx/pull/11294) Fix `emqx_ctl cluster join`, `leave`, and `status` commands. + +- [#11296](https://github.com/emqx/emqx/pull/11296) Import additional configurations from EMQX backup file (`emqx ctl import` command): + - rule_engine (previously not imported due to the bug) + - topic_metrics (previously not implemented) + - slow_subs (previously not implemented). + +- [#11309](https://github.com/emqx/emqx/pull/11309) Improve startup order of EMQX applications. + Simplify build scripts and improve code reuse. + +- [#11322](https://github.com/emqx/emqx/pull/11322) Import additional configurations from EMQX backup file (`emqx ctl import` command): + - rule_engine (previously not imported due to the bug) + - topic_metrics (previously not implemented) + - slow_subs (previously not implemented). From 485e495b9834d4d8b76de7a7a1db4d14770ea85e Mon Sep 17 00:00:00 2001 From: ieQu1 <99872536+ieQu1@users.noreply.github.com> Date: Fri, 21 Jul 2023 18:26:20 +0200 Subject: [PATCH 48/73] chore(ekka): Bump version to 0.15.8 (mria 0.5.10) --- apps/emqx/rebar.config | 2 +- changes/ce/fix-11327.en.md | 3 +++ mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changes/ce/fix-11327.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 6962ec4cc..ea362dac8 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -28,7 +28,7 @@ {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.7"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.8"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.14"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, diff --git a/changes/ce/fix-11327.en.md b/changes/ce/fix-11327.en.md new file mode 100644 index 000000000..e6e35cc2c --- /dev/null +++ b/changes/ce/fix-11327.en.md @@ -0,0 +1,3 @@ +Update ekka to version 0.15.8, mria to version 0.15.8, and optvar to 1.0.5. +This fixes occasional assertion failures: +`{{badmatch,noproc},[{optvar,read,2,[{file,"optvar.erl"},{line,140}]},{optvar,read,1,[{file,"optvar.erl"},{line,124}]},...` diff --git a/mix.exs b/mix.exs index cf29a9c3e..c6b685893 100644 --- a/mix.exs +++ b/mix.exs @@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do {:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.15.7", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.15.8", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.11", override: true}, diff --git a/rebar.config b/rebar.config index 08d817771..b4fb4fb9e 100644 --- a/rebar.config +++ b/rebar.config @@ -62,7 +62,7 @@ , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.7"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.8"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.11"}}} From 4d581d0b3091d4a4dff2e4618ea29867d2201aff Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Tue, 9 May 2023 14:43:22 +0800 Subject: [PATCH 49/73] feat: init commit --- apps/emqx_bridge_greptimedb/.gitignore | 19 ++ apps/emqx_bridge_greptimedb/LICENSE | 191 ++++++++++++++++++ apps/emqx_bridge_greptimedb/README.md | 19 ++ apps/emqx_bridge_greptimedb/rebar.config | 13 ++ .../src/emqx_bridge_greptimedb.app.src | 14 ++ .../src/emqx_bridge_greptimedb.erl | 3 + 6 files changed, 259 insertions(+) create mode 100644 apps/emqx_bridge_greptimedb/.gitignore create mode 100644 apps/emqx_bridge_greptimedb/LICENSE create mode 100644 apps/emqx_bridge_greptimedb/README.md create mode 100644 apps/emqx_bridge_greptimedb/rebar.config create mode 100644 apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src create mode 100644 apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl diff --git a/apps/emqx_bridge_greptimedb/.gitignore b/apps/emqx_bridge_greptimedb/.gitignore new file mode 100644 index 000000000..45f82dfcd --- /dev/null +++ b/apps/emqx_bridge_greptimedb/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin + log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_greptimedb/LICENSE b/apps/emqx_bridge_greptimedb/LICENSE new file mode 100644 index 000000000..64d3c22a9 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/LICENSE @@ -0,0 +1,191 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2023, Dennis Zhuang . + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/apps/emqx_bridge_greptimedb/README.md b/apps/emqx_bridge_greptimedb/README.md new file mode 100644 index 000000000..13f26c348 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/README.md @@ -0,0 +1,19 @@ +# emqx_bridge_greptimedb +This application houses the GreptimeDB data integration to EMQX. +It provides the means to connect to GreptimeDB and publish messages to it. + +It implements connection management and interaction without the need for a + separate connector app, since it's not used for authentication and authorization + applications. + +## Docs + +For more information about GreptimeDB, please refer to [official + document](https://docs.greptime.com/). + +## Configurations + +TODO + +## License +[Apache License 2.0](./LICENSE) diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config new file mode 100644 index 000000000..cb1385874 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -0,0 +1,13 @@ +{erl_opts, [ + debug_info +]}. + +{deps, [ + {emqx, {path, "../../apps/emqx"}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}}, + {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.0"}}} +]}. +{plugins, [rebar3_path_deps]}. +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src new file mode 100644 index 000000000..f63863d71 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_greptimedb, + [{description, "An OTP library"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, + [kernel, + stdlib + ]}, + {env,[]}, + {modules, []}, + + {licenses, ["Apache-2.0"]}, + {links, []} + ]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl new file mode 100644 index 000000000..ffb0e39c7 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -0,0 +1,3 @@ +-module(emqx_bridge_greptimedb). + +-export([]). From c5078980f3dd6d9324079813d1fc36505997d36f Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Tue, 9 May 2023 16:10:14 +0800 Subject: [PATCH 50/73] feat: adds the greptimedb bridge to emqx modules --- apps/emqx_bridge/src/emqx_bridge.erl | 3 ++- .../src/schema/emqx_bridge_enterprise.erl | 16 ++++++++++++++-- .../src/emqx_bridge_greptimedb_connector.erl | 1 + mix.exs | 1 + scripts/spellcheck/dicts/emqx.txt | 1 + 5 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index d5fc42ade..612481663 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -89,7 +89,8 @@ T == pulsar_producer; T == oracle; T == iotdb; - T == kinesis_producer + T == kinesis_producer; + T == greptimedb ). -define(ROOT_KEY, bridges). diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index e4ef94c9e..7b9e1d4fa 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -49,7 +49,8 @@ api_schemas(Method) -> api_ref(emqx_bridge_oracle, <<"oracle">>, Method), api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method), api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method), - api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer") + api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"), + api_ref(emqx_bridge_greptimedb, Method) ]. schema_modules() -> @@ -75,7 +76,8 @@ schema_modules() -> emqx_bridge_oracle, emqx_bridge_iotdb, emqx_bridge_rabbitmq, - emqx_bridge_kinesis + emqx_bridge_kinesis, + emqx_bridge_greptimedb ]. examples(Method) -> @@ -122,6 +124,8 @@ resource_type(oracle) -> emqx_oracle; resource_type(iotdb) -> emqx_bridge_iotdb_impl; resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector; resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer. +resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector. +resource_type(greptimedb) -> emqx_bridge_greptimedb_connector. fields(bridges) -> [ @@ -197,6 +201,14 @@ fields(bridges) -> desc => <<"Apache IoTDB Bridge Config">>, required => false } + )}, + {greptimedb, + mk( + hoconsc:map(name, ref(emqx_bridge_greptimedb, "config")), + #{ + desc => <<"GreptimeDB Bridge Config">>, + required => false + } )} ] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++ influxdb_structs() ++ diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl new file mode 100644 index 000000000..8f7aa65e2 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -0,0 +1 @@ +-module(emqx_bridge_greptimedb_connector). diff --git a/mix.exs b/mix.exs index c6b685893..4d6cf700b 100644 --- a/mix.exs +++ b/mix.exs @@ -171,6 +171,7 @@ defmodule EMQXUmbrella.MixProject do :emqx_bridge_cassandra, :emqx_bridge_opents, :emqx_bridge_dynamo, + :emqx_bridge_greptimedb, :emqx_bridge_hstreamdb, :emqx_bridge_influxdb, :emqx_bridge_iotdb, diff --git a/scripts/spellcheck/dicts/emqx.txt b/scripts/spellcheck/dicts/emqx.txt index 953b0b762..b515a0010 100644 --- a/scripts/spellcheck/dicts/emqx.txt +++ b/scripts/spellcheck/dicts/emqx.txt @@ -29,6 +29,7 @@ EPMD ERL ETS FIXME +GreptimeDB GCM HMAC HOCON From 417e01749815d41562ec991df7413a339891ba0c Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Tue, 9 May 2023 21:34:30 +0800 Subject: [PATCH 51/73] feat: begin to impl connector --- .../src/emqx_bridge_greptimedb.app.src | 26 ++-- .../src/emqx_bridge_greptimedb_connector.erl | 113 ++++++++++++++++++ 2 files changed, 126 insertions(+), 13 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index f63863d71..14d655763 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -1,14 +1,14 @@ -{application, emqx_bridge_greptimedb, - [{description, "An OTP library"}, - {vsn, "0.1.0"}, - {registered, []}, - {applications, - [kernel, - stdlib - ]}, - {env,[]}, - {modules, []}, +{application, emqx_bridge_greptimedb, [ + {description, "An OTP library"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]}, + {env, []}, + {modules, []}, - {licenses, ["Apache-2.0"]}, - {links, []} - ]}. + {licenses, ["Apache-2.0"]}, + {links, []} +]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 8f7aa65e2..17c4d9a3c 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -1 +1,114 @@ -module(emqx_bridge_greptimedb_connector). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4, + on_batch_query/3, + on_batch_query_async/4 +]). + +-define(GREPTIMEDB_DEFAULT_PORT, 4001). + +-define(GREPTIMEDB_HOST_OPTIONS, #{ + default_port => ?GREPTIMEDB_DEFAULT_PORT +}). + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- +callback_mode() -> async_if_possible. + +on_start(InstId, Config) -> + start_client(InstId, Config). + +on_stop(_InstId, #{client := Client}) -> + greptimedb:stop_client(Client). + +on_get_status(_InstId, _State) -> + %% FIXME + connected. + +on_query(_InstanceId, {send_message, _Message}, _State) -> + todo. + +on_query_async(_InstanceId, {send_message, _Message}, _ReplyFunAndArgs0, _State) -> + todo. + +on_batch_query( + _ResourceID, + _BatchReq, + _State +) -> + todo. + +on_batch_query_async( + _InstId, + _BatchData, + {_ReplyFun, _Args}, + _State +) -> + todo. + +%% internal functions + +start_client(InstId, Config) -> + ClientConfig = client_config(InstId, Config), + ?SLOG(info, #{ + msg => "starting GreptimeDB connector", + connector => InstId, + config => emqx_utils:redact(Config), + client_config => emqx_utils:redact(ClientConfig) + }), + try + case greptimedb:start_client(ClientConfig) of + {ok, Client} -> + {ok, #{client => Client}}; + {error, Reason} -> + ?tp(greptimedb_connector_start_failed, #{error => Reason}), + ?SLOG(warning, #{ + msg => "failed_to_start_greptimedb_connector", + connector => InstId, + reason => Reason + }), + {error, Reason} + end + catch + E:R:S -> + ?tp(greptimedb_connector_start_exception, #{error => {E, R}}), + ?SLOG(warning, #{ + msg => "start greptimedb connector error", + connector => InstId, + error => E, + reason => R, + stack => S + }), + {error, R} + end. + +client_config( + InstId, + _Config = #{ + server := Server + } +) -> + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?GREPTIMEDB_HOST_OPTIONS), + [ + {endpoints, [{http, str(Host), Port}]}, + {pool_size, erlang:system_info(schedulers)}, + {pool, InstId}, + {pool_type, random} + ]. + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. From 6d9944a8e8dd37b59265a70b19a37ee52772d092 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Tue, 4 Jul 2023 15:19:14 +0800 Subject: [PATCH 52/73] feat: update greptimedb dependencies --- apps/emqx_bridge_greptimedb/rebar.config | 2 +- .../src/emqx_bridge_greptimedb.app.src | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index cb1385874..cbde4660f 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -7,7 +7,7 @@ {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.0"}}} + {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.1"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index 14d655763..f0a07bc28 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -1,14 +1,14 @@ {application, emqx_bridge_greptimedb, [ - {description, "An OTP library"}, + {description, "EMQX GreptimeDB Bridge"}, {vsn, "0.1.0"}, {registered, []}, {applications, [ kernel, - stdlib + stdlib, + greptimedb ]}, {env, []}, {modules, []}, - {licenses, ["Apache-2.0"]}, {links, []} ]}. From 91ebd90442d9f77ee89f6ac1b0d4f50532feb754 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 6 Jul 2023 12:12:08 +0800 Subject: [PATCH 53/73] fix: batch write --- apps/emqx_bridge_greptimedb/rebar.config | 9 +- .../src/emqx_bridge_greptimedb.erl | 297 ++++++++- .../src/emqx_bridge_greptimedb_connector.erl | 601 ++++++++++++++++-- .../test/emqx_bridge_greptimedb_tests.erl | 348 ++++++++++ 4 files changed, 1202 insertions(+), 53 deletions(-) create mode 100644 apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_tests.erl diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index cbde4660f..952281286 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -3,11 +3,10 @@ ]}. {deps, [ - {emqx, {path, "../../apps/emqx"}}, - {emqx_connector, {path, "../../apps/emqx_connector"}}, - {emqx_resource, {path, "../../apps/emqx_resource"}}, - {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.1"}}} + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}}, + {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.1"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl index ffb0e39c7..f37ddf320 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -1,3 +1,298 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- -module(emqx_bridge_greptimedb). --export([]). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-type write_syntax() :: list(). +-reflect_type([write_syntax/0]). +-typerefl_from_string({write_syntax/0, ?MODULE, to_influx_lines}). +-export([to_influx_lines/1]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"greptimedb_grpc_v1">> => #{ + summary => <<"Greptimedb HTTP API V2 Bridge">>, + value => values("greptimedb_grpc_v1", Method) + } + } + ]. + +values(Protocol, get) -> + values(Protocol, post); +values("greptimedb_grpc_v1", post) -> + SupportUint = <<"uint_value=${payload.uint_key}u,">>, + TypeOpts = #{ + bucket => <<"example_bucket">>, + org => <<"examlpe_org">>, + token => <<"example_token">>, + server => <<"127.0.0.1:4000">> + }, + values(common, "greptimedb_grpc_v1", SupportUint, TypeOpts); +values(Protocol, put) -> + values(Protocol, post). + +values(common, Protocol, SupportUint, TypeOpts) -> + CommonConfigs = #{ + type => list_to_atom(Protocol), + name => <<"demo">>, + enable => true, + local_topic => <<"local/topic/#">>, + write_syntax => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", SupportUint/binary, + "bool=${payload.bool}">>, + precision => ms, + resource_opts => #{ + batch_size => 100, + batch_time => <<"20ms">> + }, + server => <<"127.0.0.1:4000">>, + ssl => #{enable => false} + }, + maps:merge(TypeOpts, CommonConfigs). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_greptimedb". + +roots() -> []. + +fields("post_grpc_v1") -> + method_fields(post, greptimedb_grpc_v1); +fields("put_grpc_v1") -> + method_fields(put, greptimedb_grpc_v1); +fields("get_grpc_v1") -> + method_fields(get, greptimedb_grpc_v1); +fields(Type) when + Type == greptimedb_grpc_v1 +-> + greptimedb_bridge_common_fields() ++ + connector_fields(Type). + +method_fields(post, ConnectorType) -> + greptimedb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType); +method_fields(get, ConnectorType) -> + greptimedb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType) ++ + emqx_bridge_schema:status_fields(); +method_fields(put, ConnectorType) -> + greptimedb_bridge_common_fields() ++ + connector_fields(ConnectorType). + +greptimedb_bridge_common_fields() -> + emqx_bridge_schema:common_bridge_fields() ++ + [ + {write_syntax, fun write_syntax/1} + ] ++ + emqx_resource_schema:fields("resource_opts"). + +connector_fields(Type) -> + emqx_bridge_greptimedb_connector:fields(Type). + +type_name_fields(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Greptimedb using `", string:to_upper(Method), "` method."]; +desc(greptimedb_grpc_v1) -> + ?DESC(emqx_bridge_greptimedb_connector, "greptimedb_grpc_v1"); +desc(_) -> + undefined. + +write_syntax(type) -> + ?MODULE:write_syntax(); +write_syntax(required) -> + true; +write_syntax(validator) -> + [?NOT_EMPTY("the value of the field 'write_syntax' cannot be empty")]; +write_syntax(converter) -> + fun to_influx_lines/1; +write_syntax(desc) -> + ?DESC("write_syntax"); +write_syntax(format) -> + <<"sql">>; +write_syntax(_) -> + undefined. + +to_influx_lines(RawLines) -> + try + influx_lines(str(RawLines), []) + catch + _:Reason:Stacktrace -> + Msg = lists:flatten( + io_lib:format("Unable to parse Greptimedb line protocol: ~p", [RawLines]) + ), + ?SLOG(error, #{msg => Msg, error_reason => Reason, stacktrace => Stacktrace}), + throw(Msg) + end. + +-define(MEASUREMENT_ESC_CHARS, [$,, $\s]). +-define(TAG_FIELD_KEY_ESC_CHARS, [$,, $=, $\s]). +-define(FIELD_VAL_ESC_CHARS, [$", $\\]). +% Common separator for both tags and fields +-define(SEP, $\s). +-define(MEASUREMENT_TAG_SEP, $,). +-define(KEY_SEP, $=). +-define(VAL_SEP, $,). +-define(NON_EMPTY, [_ | _]). + +influx_lines([] = _RawLines, Acc) -> + ?NON_EMPTY = lists:reverse(Acc); +influx_lines(RawLines, Acc) -> + {Acc1, RawLines1} = influx_line(string:trim(RawLines, leading, "\s\n"), Acc), + influx_lines(RawLines1, Acc1). + +influx_line([], Acc) -> + {Acc, []}; +influx_line(Line, Acc) -> + {?NON_EMPTY = Measurement, Line1} = measurement(Line), + {Tags, Line2} = tags(Line1), + {?NON_EMPTY = Fields, Line3} = influx_fields(Line2), + {Timestamp, Line4} = timestamp(Line3), + { + [ + #{ + measurement => Measurement, + tags => Tags, + fields => Fields, + timestamp => Timestamp + } + | Acc + ], + Line4 + }. + +measurement(Line) -> + unescape(?MEASUREMENT_ESC_CHARS, [?MEASUREMENT_TAG_SEP, ?SEP], Line, []). + +tags([?MEASUREMENT_TAG_SEP | Line]) -> + tags1(Line, []); +tags(Line) -> + {[], Line}. + +%% Empty line is invalid as fields are required after tags, +%% need to break recursion here and fail later on parsing fields +tags1([] = Line, Acc) -> + {lists:reverse(Acc), Line}; +%% Matching non empty Acc treats lines like "m, field=field_val" invalid +tags1([?SEP | _] = Line, ?NON_EMPTY = Acc) -> + {lists:reverse(Acc), Line}; +tags1(Line, Acc) -> + {Tag, Line1} = tag(Line), + tags1(Line1, [Tag | Acc]). + +tag(Line) -> + {?NON_EMPTY = Key, Line1} = key(Line), + {?NON_EMPTY = Val, Line2} = tag_val(Line1), + {{Key, Val}, Line2}. + +tag_val(Line) -> + {Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP], Line, []), + {Val, strip_l(Line1, ?VAL_SEP)}. + +influx_fields([?SEP | Line]) -> + fields1(string:trim(Line, leading, "\s"), []). + +%% Timestamp is optional, so fields may be at the very end of the line +fields1([Ch | _] = Line, Acc) when Ch =:= ?SEP; Ch =:= $\n -> + {lists:reverse(Acc), Line}; +fields1([] = Line, Acc) -> + {lists:reverse(Acc), Line}; +fields1(Line, Acc) -> + {Field, Line1} = field(Line), + fields1(Line1, [Field | Acc]). + +field(Line) -> + {?NON_EMPTY = Key, Line1} = key(Line), + {Val, Line2} = field_val(Line1), + {{Key, Val}, Line2}. + +field_val([$" | Line]) -> + {Val, [$" | Line1]} = unescape(?FIELD_VAL_ESC_CHARS, [$"], Line, []), + %% Quoted val can be empty + {Val, strip_l(Line1, ?VAL_SEP)}; +field_val(Line) -> + %% Unquoted value should not be un-escaped according to Greptimedb protocol, + %% as it can only hold float, integer, uinteger or boolean value. + %% However, as templates are possible, un-escaping is applied here, + %% which also helps to detect some invalid lines, e.g.: "m,tag=1 field= ${timestamp}" + {Val, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?VAL_SEP, ?SEP, $\n], Line, []), + {?NON_EMPTY = Val, strip_l(Line1, ?VAL_SEP)}. + +timestamp([?SEP | Line]) -> + Line1 = string:trim(Line, leading, "\s"), + %% Similarly to unquoted field value, un-escape a timestamp to validate and handle + %% potentially escaped characters in a template + {T, Line2} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?SEP, $\n], Line1, []), + {timestamp1(T), Line2}; +timestamp(Line) -> + {undefined, Line}. + +timestamp1(?NON_EMPTY = Ts) -> Ts; +timestamp1(_Ts) -> undefined. + +%% Common for both tag and field keys +key(Line) -> + {Key, Line1} = unescape(?TAG_FIELD_KEY_ESC_CHARS, [?KEY_SEP], Line, []), + {Key, strip_l(Line1, ?KEY_SEP)}. + +%% Only strip a character between pairs, don't strip it(and let it fail) +%% if the char to be stripped is at the end, e.g.: m,tag=val, field=val +strip_l([Ch, Ch1 | Str], Ch) when Ch1 =/= ?SEP -> + [Ch1 | Str]; +strip_l(Str, _Ch) -> + Str. + +unescape(EscapeChars, SepChars, [$\\, Char | T], Acc) -> + ShouldEscapeBackslash = lists:member($\\, EscapeChars), + Acc1 = + case lists:member(Char, EscapeChars) of + true -> [Char | Acc]; + false when not ShouldEscapeBackslash -> [Char, $\\ | Acc] + end, + unescape(EscapeChars, SepChars, T, Acc1); +unescape(EscapeChars, SepChars, [Char | T] = L, Acc) -> + IsEscapeChar = lists:member(Char, EscapeChars), + case lists:member(Char, SepChars) of + true -> {lists:reverse(Acc), L}; + false when not IsEscapeChar -> unescape(EscapeChars, SepChars, T, [Char | Acc]) + end; +unescape(_EscapeChars, _SepChars, [] = L, Acc) -> + {lists:reverse(Acc), L}. + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 17c4d9a3c..a02df09c5 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -1,84 +1,179 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- -module(emqx_bridge_greptimedb_connector). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -%% `emqx_resource' API +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource -export([ callback_mode/0, on_start/2, on_stop/2, - on_get_status/2, on_query/3, - on_query_async/4, on_batch_query/3, - on_batch_query_async/4 + on_get_status/2 ]). --define(GREPTIMEDB_DEFAULT_PORT, 4001). +-export([ + roots/0, + namespace/0, + fields/1, + desc/1 +]). + +%% only for test +-export([is_unrecoverable_error/1]). + +-type ts_precision() :: ns | us | ms | s. + +%% Allocatable resources +-define(greptime_client, greptime_client). + +-define(GREPTIMEDB_DEFAULT_PORT, 4000). + +-define(DEFAULT_DB, <<"public">>). -define(GREPTIMEDB_HOST_OPTIONS, #{ default_port => ?GREPTIMEDB_DEFAULT_PORT }). -%%------------------------------------------------------------------------------------- -%% `emqx_resource' API -%%------------------------------------------------------------------------------------- -callback_mode() -> async_if_possible. +-define(DEFAULT_TIMESTAMP_TMPL, "${timestamp}"). + +%% ------------------------------------------------------------------------------------------------- +%% resource callback +callback_mode() -> always_sync. on_start(InstId, Config) -> + %% InstID as pool would be handled by greptimedb client + %% so there is no need to allocate pool_name here + %% See: greptimedb:start_client/1 start_client(InstId, Config). -on_stop(_InstId, #{client := Client}) -> - greptimedb:stop_client(Client). +on_stop(InstId, _State) -> + case emqx_resource:get_allocated_resources(InstId) of + #{?greptime_client := Client} -> + greptimedb:stop_client(Client); + _ -> + ok + end. -on_get_status(_InstId, _State) -> - %% FIXME - connected. +on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + ?tp( + greptimedb_connector_send_query, + #{points => Points, batch => false, mode => sync} + ), + do_query(InstId, Client, Points); + {error, ErrorPoints} -> + ?tp( + greptimedb_connector_send_query_error, + #{batch => false, mode => sync, error => ErrorPoints} + ), + log_error_points(InstId, ErrorPoints), + {error, {unrecoverable_error, ErrorPoints}} + end. -on_query(_InstanceId, {send_message, _Message}, _State) -> - todo. +%% Once a Batched Data trans to points failed. +%% This batch query failed +on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case parse_batch_data(InstId, BatchData, SyntaxLines) of + {ok, Points} -> + ?tp( + greptimedb_connector_send_query, + #{points => Points, batch => true, mode => sync} + ), + do_query(InstId, Client, Points); + {error, Reason} -> + ?tp( + greptimedb_connector_send_query_error, + #{batch => true, mode => sync, error => Reason} + ), + {error, {unrecoverable_error, Reason}} + end. -on_query_async(_InstanceId, {send_message, _Message}, _ReplyFunAndArgs0, _State) -> - todo. +on_get_status(_InstId, #{client := Client}) -> + case greptimedb:is_alive(Client) of + true -> + connected; + false -> + disconnected + end. -on_batch_query( - _ResourceID, - _BatchReq, - _State -) -> - todo. +%% ------------------------------------------------------------------------------------------------- +%% schema +namespace() -> connector_greptimedb. -on_batch_query_async( - _InstId, - _BatchData, - {_ReplyFun, _Args}, - _State -) -> - todo. +roots() -> + [ + {config, #{ + type => hoconsc:union( + [ + hoconsc:ref(?MODULE, greptimedb_grpc_v1) + ] + ) + }} + ]. +fields(common) -> + [ + {server, server()}, + {precision, + %% The greptimedb only supports these 4 precision: + %% See "https://github.com/influxdata/greptimedb/blob/ + %% 6b607288439a991261307518913eb6d4e280e0a7/models/points.go#L487" for + %% more information. + mk(enum([ns, us, ms, s]), #{ + required => false, default => ms, desc => ?DESC("precision") + })} + ]; +fields(greptimedb_grpc_v1) -> + fields(common) ++ + [ + {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})} + ] ++ emqx_connector_schema_lib:ssl_fields(). + +server() -> + Meta = #{ + required => false, + default => <<"127.0.0.1:4000">>, + desc => ?DESC("server"), + converter => fun convert_server/2 + }, + emqx_schema:servers_sc(Meta, ?GREPTIMEDB_HOST_OPTIONS). + +desc(common) -> + ?DESC("common"); +desc(greptimedb_grpc_v1) -> + ?DESC("greptimedb_grpc_v1"). + +%% ------------------------------------------------------------------------------------------------- %% internal functions start_client(InstId, Config) -> ClientConfig = client_config(InstId, Config), ?SLOG(info, #{ - msg => "starting GreptimeDB connector", + msg => "starting greptimedb connector", connector => InstId, config => emqx_utils:redact(Config), client_config => emqx_utils:redact(ClientConfig) }), - try - case greptimedb:start_client(ClientConfig) of - {ok, Client} -> - {ok, #{client => Client}}; - {error, Reason} -> - ?tp(greptimedb_connector_start_failed, #{error => Reason}), - ?SLOG(warning, #{ - msg => "failed_to_start_greptimedb_connector", - connector => InstId, - reason => Reason - }), - {error, Reason} - end + try do_start_client(InstId, ClientConfig, Config) of + Res = {ok, #{client := Client}} -> + ok = emqx_resource:allocate_resource(InstId, ?greptime_client, Client), + Res; + {error, Reason} -> + {error, Reason} catch E:R:S -> ?tp(greptimedb_connector_start_exception, #{error => {E, R}}), @@ -92,9 +187,64 @@ start_client(InstId, Config) -> {error, R} end. +do_start_client( + InstId, + ClientConfig, + Config = #{write_syntax := Lines} +) -> + Precision = maps:get(precision, Config, ms), + case greptimedb:start_client(ClientConfig) of + {ok, Client} -> + case greptimedb:is_alive(Client, true) of + true -> + State = #{ + client => Client, + dbname => proplists:get_value(dbname, ClientConfig, ?DEFAULT_DB), + write_syntax => to_config(Lines, Precision) + }, + ?SLOG(info, #{ + msg => "starting greptimedb connector success", + connector => InstId, + client => redact_auth(Client), + state => redact_auth(State) + }), + {ok, State}; + {false, Reason} -> + ?tp(greptimedb_connector_start_failed, #{ + error => greptimedb_client_not_alive, reason => Reason + }), + ?SLOG(warning, #{ + msg => "failed_to_start_greptimedb_connector", + connector => InstId, + client => redact_auth(Client), + reason => Reason + }), + %% no leak + _ = greptimedb:stop_client(Client), + {error, greptimedb_client_not_alive} + end; + {error, {already_started, Client0}} -> + ?tp(greptimedb_connector_start_already_started, #{}), + ?SLOG(info, #{ + msg => "restarting greptimedb connector, found already started client", + connector => InstId, + old_client => redact_auth(Client0) + }), + _ = greptimedb:stop_client(Client0), + do_start_client(InstId, ClientConfig, Config); + {error, Reason} -> + ?tp(greptimedb_connector_start_failed, #{error => Reason}), + ?SLOG(warning, #{ + msg => "failed_to_start_greptimedb_connector", + connector => InstId, + reason => Reason + }), + {error, Reason} + end. + client_config( InstId, - _Config = #{ + Config = #{ server := Server } ) -> @@ -103,12 +253,369 @@ client_config( {endpoints, [{http, str(Host), Port}]}, {pool_size, erlang:system_info(schedulers)}, {pool, InstId}, - {pool_type, random} + {pool_type, random}, + {timeunit, maps:get(precision, Config, ms)} + ] ++ protocol_config(Config). + +protocol_config( + #{ + dbname := DbName, + ssl := SSL + } = Config +) -> + [ + {dbname, DbName} + ] ++ auth(Config) ++ + ssl_config(SSL). + +ssl_config(#{enable := false}) -> + [ + {https_enabled, false} + ]; +ssl_config(SSL = #{enable := true}) -> + [ + {https_enabled, true}, + {transport, ssl}, + {transport_opts, emqx_tls_lib:to_client_opts(SSL)} ]. +auth(#{username := Username, password := Password}) -> + [ + {auth, {basic, #{username => Username, password => Password}}} + ]; +auth(_) -> + []. + +redact_auth(Term) -> + emqx_utils:redact(Term, fun is_auth_key/1). + +is_auth_key(Key) when is_binary(Key) -> + string:equal("authorization", Key, true); +is_auth_key(_) -> + false. + +%% ------------------------------------------------------------------------------------------------- +%% Query +do_query(InstId, Client, Points) -> + case greptimedb:write_batch(Client, Points) of + {ok, _} -> + ?SLOG(debug, #{ + msg => "greptimedb write point success", + connector => InstId, + points => Points + }); + {error, {401, _, _}} -> + ?tp(greptimedb_connector_do_query_failure, #{error => <<"authorization failure">>}), + ?SLOG(error, #{ + msg => "greptimedb_authorization_failed", + client => redact_auth(Client), + connector => InstId + }), + {error, {unrecoverable_error, <<"authorization failure">>}}; + {error, Reason} = Err -> + ?tp(greptimedb_connector_do_query_failure, #{error => Reason}), + ?SLOG(error, #{ + msg => "greptimedb write point failed", + connector => InstId, + reason => Reason + }), + case is_unrecoverable_error(Err) of + true -> + {error, {unrecoverable_error, Reason}}; + false -> + {error, {recoverable_error, Reason}} + end + end. + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Config Trans + +to_config(Lines, Precision) -> + to_config(Lines, [], Precision). + +to_config([], Acc, _Precision) -> + lists:reverse(Acc); +to_config([Item0 | Rest], Acc, Precision) -> + Ts0 = maps:get(timestamp, Item0, undefined), + {Ts, FromPrecision, ToPrecision} = preproc_tmpl_timestamp(Ts0, Precision), + Item = #{ + measurement => emqx_placeholder:preproc_tmpl(maps:get(measurement, Item0)), + timestamp => Ts, + precision => {FromPrecision, ToPrecision}, + tags => to_kv_config(maps:get(tags, Item0)), + fields => to_kv_config(maps:get(fields, Item0)) + }, + to_config(Rest, [Item | Acc], Precision). + +%% pre-process the timestamp template +%% returns a tuple of three elements: +%% 1. The timestamp template itself. +%% 2. The source timestamp precision (ms if the template ${timestamp} is used). +%% 3. The target timestamp precision (configured for the client). +preproc_tmpl_timestamp(undefined, Precision) -> + %% not configured, we default it to the message timestamp + preproc_tmpl_timestamp(?DEFAULT_TIMESTAMP_TMPL, Precision); +preproc_tmpl_timestamp(Ts, Precision) when is_integer(Ts) -> + %% a const value is used which is very much unusual, but we have to add a special handling + {Ts, Precision, Precision}; +preproc_tmpl_timestamp(Ts, Precision) when is_list(Ts) -> + preproc_tmpl_timestamp(iolist_to_binary(Ts), Precision); +preproc_tmpl_timestamp(<> = Ts, Precision) -> + {emqx_placeholder:preproc_tmpl(Ts), ms, Precision}; +preproc_tmpl_timestamp(Ts, Precision) when is_binary(Ts) -> + %% a placehold is in use. e.g. ${payload.my_timestamp} + %% we can only hope it the value will be of the same precision in the configs + {emqx_placeholder:preproc_tmpl(Ts), Precision, Precision}. + +to_kv_config(KVfields) -> + maps:fold(fun to_maps_config/3, #{}, proplists:to_map(KVfields)). + +to_maps_config(K, V, Res) -> + NK = emqx_placeholder:preproc_tmpl(bin(K)), + NV = emqx_placeholder:preproc_tmpl(bin(V)), + Res#{NK => NV}. + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Data Trans +parse_batch_data(InstId, BatchData, SyntaxLines) -> + {Points, Errors} = lists:foldl( + fun({send_message, Data}, {ListOfPoints, ErrAccIn}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + {[Points | ListOfPoints], ErrAccIn}; + {error, ErrorPoints} -> + log_error_points(InstId, ErrorPoints), + {ListOfPoints, ErrAccIn + 1} + end + end, + {[], 0}, + BatchData + ), + case Errors of + 0 -> + {ok, lists:flatten(Points)}; + _ -> + ?SLOG(error, #{ + msg => io_lib:format("Greptimedb trans point failed, count: ~p", [Errors]), + connector => InstId, + reason => points_trans_failed + }), + {error, points_trans_failed} + end. + +-spec data_to_points(map(), [ + #{ + fields := [{binary(), binary()}], + measurement := binary(), + tags := [{binary(), binary()}], + timestamp := emqx_placeholder:tmpl_token() | integer(), + precision := {From :: ts_precision(), To :: ts_precision()} + } +]) -> {ok, [map()]} | {error, term()}. +data_to_points(Data, SyntaxLines) -> + lines_to_points(Data, SyntaxLines, [], []). + +%% When converting multiple rows data into Greptimedb Line Protocol, they are considered to be strongly correlated. +%% And once a row fails to convert, all of them are considered to have failed. +lines_to_points(_, [], Points, ErrorPoints) -> + case ErrorPoints of + [] -> + {ok, Points}; + _ -> + %% ignore trans succeeded points + {error, ErrorPoints} + end; +lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when + is_list(Ts) +-> + TransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + case parse_timestamp(emqx_placeholder:proc_tmpl(Ts, Data, TransOptions)) of + {ok, TsInt} -> + Item1 = Item#{timestamp => TsInt}, + continue_lines_to_points(Data, Item1, Rest, ResultPointsAcc, ErrorPointsAcc); + {error, BadTs} -> + lines_to_points(Data, Rest, ResultPointsAcc, [ + {error, {bad_timestamp, BadTs}} | ErrorPointsAcc + ]) + end; +lines_to_points(Data, [#{timestamp := Ts} = Item | Rest], ResultPointsAcc, ErrorPointsAcc) when + is_integer(Ts) +-> + continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc). + +parse_timestamp([TsInt]) when is_integer(TsInt) -> + {ok, TsInt}; +parse_timestamp([TsBin]) -> + try + {ok, binary_to_integer(TsBin)} + catch + _:_ -> + {error, TsBin} + end. + +continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc) -> + case line_to_point(Data, Item) of + #{fields := Fields} when map_size(Fields) =:= 0 -> + %% greptimedb client doesn't like empty field maps... + ErrorPointsAcc1 = [{error, no_fields} | ErrorPointsAcc], + lines_to_points(Data, Rest, ResultPointsAcc, ErrorPointsAcc1); + Point -> + lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc) + end. + +line_to_point( + Data, + #{ + measurement := Measurement, + tags := Tags, + fields := Fields, + timestamp := Ts, + precision := Precision + } = Item +) -> + {_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), + {_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), + TableName = emqx_placeholder:proc_tmpl(Measurement, Data), + {TableName, [ + maps:without([precision], Item#{ + tags => EncodedTags, + fields => EncodedFields, + timestamp => maybe_convert_time_unit(Ts, Precision) + }) + ]}. + +maybe_convert_time_unit(Ts, {FromPrecision, ToPrecision}) -> + erlang:convert_time_unit(Ts, time_unit(FromPrecision), time_unit(ToPrecision)). + +time_unit(s) -> second; +time_unit(ms) -> millisecond; +time_unit(us) -> microsecond; +time_unit(ns) -> nanosecond. + +maps_config_to_data(K, V, {Data, Res}) -> + KTransOptions = #{return => rawlist, var_trans => fun key_filter/1}, + VTransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + NK0 = emqx_placeholder:proc_tmpl(K, Data, KTransOptions), + NV = emqx_placeholder:proc_tmpl(V, Data, VTransOptions), + case {NK0, NV} of + {[undefined], _} -> + {Data, Res}; + %% undefined value in normal format [undefined] or int/uint format [undefined, <<"i">>] + {_, [undefined | _]} -> + {Data, Res}; + _ -> + NK = list_to_binary(NK0), + {Data, Res#{NK => value_type(NV)}} + end. + +value_type([Int, <<"i">>]) when + is_integer(Int) +-> + greptimedb_values:int64_value(Int); +value_type([UInt, <<"u">>]) when + is_integer(UInt) +-> + greptimedb_values:uint64_value(UInt); +value_type([Float]) when is_float(Float) -> + Float; +value_type([<<"t">>]) -> + greptimedb_values:boolean_value(true); +value_type([<<"T">>]) -> + greptimedb_values:boolean_value(true); +value_type([true]) -> + greptimedb_values:boolean_value(true); +value_type([<<"TRUE">>]) -> + greptimedb_values:boolean_value(true); +value_type([<<"True">>]) -> + greptimedb_values:boolean_value(true); +value_type([<<"f">>]) -> + greptimedb_values:boolean_value(false); +value_type([<<"F">>]) -> + greptimedb_values:boolean_value(false); +value_type([false]) -> + greptimedb_values:boolean_value(false); +value_type([<<"FALSE">>]) -> + greptimedb_values:boolean_value(false); +value_type([<<"False">>]) -> + greptimedb_values:boolean_value(false); +value_type(Val) -> + #{values => #{string_values => Val, datatype => 'STRING'}}. + +key_filter(undefined) -> undefined; +key_filter(Value) -> emqx_utils_conv:bin(Value). + +data_filter(undefined) -> undefined; +data_filter(Int) when is_integer(Int) -> Int; +data_filter(Number) when is_number(Number) -> Number; +data_filter(Bool) when is_boolean(Bool) -> Bool; +data_filter(Data) -> bin(Data). + +bin(Data) -> emqx_utils_conv:bin(Data). + +%% helper funcs +log_error_points(InstId, Errs) -> + lists:foreach( + fun({error, Reason}) -> + ?SLOG(error, #{ + msg => "greptimedb trans point failed", + connector => InstId, + reason => Reason + }) + end, + Errs + ). + +convert_server(<<"http://", Server/binary>>, HoconOpts) -> + convert_server(Server, HoconOpts); +convert_server(<<"https://", Server/binary>>, HoconOpts) -> + convert_server(Server, HoconOpts); +convert_server(Server, HoconOpts) -> + emqx_schema:convert_servers(Server, HoconOpts). + str(A) when is_atom(A) -> atom_to_list(A); str(B) when is_binary(B) -> binary_to_list(B); str(S) when is_list(S) -> S. + +is_unrecoverable_error({error, {unrecoverable_error, _}}) -> + true; +is_unrecoverable_error(_) -> + false. + +%%=================================================================== +%% eunit tests +%%=================================================================== + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +is_auth_key_test_() -> + [ + ?_assert(is_auth_key(<<"Authorization">>)), + ?_assertNot(is_auth_key(<<"Something">>)), + ?_assertNot(is_auth_key(89)) + ]. + +%% for coverage +desc_test_() -> + [ + ?_assertMatch( + {desc, _, _}, + desc(common) + ), + ?_assertMatch( + {desc, _, _}, + desc(greptimedb_grpc_v1) + ), + ?_assertMatch( + {desc, _, _}, + hocon_schema:field_schema(server(), desc) + ), + ?_assertMatch( + connector_greptimedb, + namespace() + ) + ]. +-endif. diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_tests.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_tests.erl new file mode 100644 index 000000000..a07ccd92d --- /dev/null +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_tests.erl @@ -0,0 +1,348 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_greptimedb_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(INVALID_LINES, [ + " ", + " \n", + " \n\n\n ", + "\n", + " \n\n \n \n", + "measurement", + "measurement ", + "measurement,tag", + "measurement field", + "measurement,tag field", + "measurement,tag field ${timestamp}", + "measurement,tag=", + "measurement,tag=tag1", + "measurement,tag =", + "measurement field=", + "measurement field= ", + "measurement field = ", + "measurement, tag = field = ", + "measurement, tag = field = ", + "measurement, tag = tag_val field = field_val", + "measurement, tag = tag_val field = field_val ${timestamp}", + "measurement,= = ${timestamp}", + "measurement,t=a, f=a, ${timestamp}", + "measurement,t=a,t1=b, f=a,f1=b, ${timestamp}", + "measurement,t=a,t1=b, f=a,f1=b,", + "measurement,t=a, t1=b, f=a,f1=b,", + "measurement,t=a,,t1=b, f=a,f1=b,", + "measurement,t=a,,t1=b f=a,,f1=b", + "measurement,t=a,,t1=b f=a,f1=b ${timestamp}", + "measurement, f=a,f1=b", + "measurement, f=a,f1=b ${timestamp}", + "measurement,, f=a,f1=b ${timestamp}", + "measurement,, f=a,f1=b", + "measurement,, f=a,f1=b,, ${timestamp}", + "measurement f=a,f1=b,, ${timestamp}", + "measurement,t=a f=a,f1=b,, ${timestamp}", + "measurement,t=a f=a,f1=b,, ", + "measurement,t=a f=a,f1=b,,", + "measurement, t=a f=a,f1=b", + "measurement,t=a f=a, f1=b", + "measurement,t=a f=a, f1=b ${timestamp}", + "measurement, t=a f=a, f1=b ${timestamp}", + "measurement,t= a f=a,f1=b ${timestamp}", + "measurement,t= a f=a,f1 =b ${timestamp}", + "measurement, t = a f = a,f1 = b ${timestamp}", + "measurement,t=a f=a,f1=b \n ${timestamp}", + "measurement,t=a \n f=a,f1=b \n ${timestamp}", + "measurement,t=a \n f=a,f1=b \n ", + "\n measurement,t=a \n f=a,f1=b \n ${timestamp}", + "\n measurement,t=a \n f=a,f1=b \n", + %% not escaped backslash in a quoted field value is invalid + "measurement,tag=1 field=\"val\\1\"" +]). + +-define(VALID_LINE_PARSED_PAIRS, [ + {"m1,tag=tag1 field=field1 ${timestamp1}", #{ + measurement => "m1", + tags => [{"tag", "tag1"}], + fields => [{"field", "field1"}], + timestamp => "${timestamp1}" + }}, + {"m2,tag=tag2 field=field2", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field2"}], + timestamp => undefined + }}, + {"m3 field=field3 ${timestamp3}", #{ + measurement => "m3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${timestamp3}" + }}, + {"m4 field=field4", #{ + measurement => "m4", + tags => [], + fields => [{"field", "field4"}], + timestamp => undefined + }}, + {"m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5}", + #{ + measurement => "m5", + tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}], + fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}], + timestamp => "${timestamp5}" + }}, + {"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b", #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }}, + {"m7,tag=tag7,tag_a=\"tag7a\",tag_b=tag7b field=\"field7\",field_a=field7a,field_b=\"field7b\"", + #{ + measurement => "m7", + tags => [{"tag", "tag7"}, {"tag_a", "\"tag7a\""}, {"tag_b", "tag7b"}], + fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b"}], + timestamp => undefined + }}, + {"m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a,field_b=\"field8b\" ${timestamp8}", + #{ + measurement => "m8", + tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}], + fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "field8b"}], + timestamp => "${timestamp8}" + }}, + {"m9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}", + #{ + measurement => "m9", + tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}], + fields => [{"field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}], + timestamp => "${timestamp9}" + }}, + {"m10 field=\"\" ${timestamp10}", #{ + measurement => "m10", + tags => [], + fields => [{"field", ""}], + timestamp => "${timestamp10}" + }} +]). + +-define(VALID_LINE_EXTRA_SPACES_PARSED_PAIRS, [ + {"\n m1,tag=tag1 field=field1 ${timestamp1} \n", #{ + measurement => "m1", + tags => [{"tag", "tag1"}], + fields => [{"field", "field1"}], + timestamp => "${timestamp1}" + }}, + {" m2,tag=tag2 field=field2 ", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field2"}], + timestamp => undefined + }}, + {" m3 field=field3 ${timestamp3} ", #{ + measurement => "m3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${timestamp3}" + }}, + {" \n m4 field=field4\n ", #{ + measurement => "m4", + tags => [], + fields => [{"field", "field4"}], + timestamp => undefined + }}, + {" \n m5,tag=tag5,tag_a=tag5a,tag_b=tag5b field=field5,field_a=field5a,field_b=field5b ${timestamp5} \n", + #{ + measurement => "m5", + tags => [{"tag", "tag5"}, {"tag_a", "tag5a"}, {"tag_b", "tag5b"}], + fields => [{"field", "field5"}, {"field_a", "field5a"}, {"field_b", "field5b"}], + timestamp => "${timestamp5}" + }}, + {" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=field6,field_a=field6a,field_b=field6b\n ", #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }} +]). + +-define(VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS, [ + {"m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1}", #{ + measurement => "m =1,", + tags => [{",tag =", "=tag 1,"}], + fields => [{",fie ld ", " field,1"}], + timestamp => "${timestamp1}" + }}, + {"m2,tag=tag2 field=\"field \\\"2\\\",\n\"", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field \"2\",\n"}], + timestamp => undefined + }}, + {"m\\ 3 field=\"field3\" ${payload.timestamp\\ 3}", #{ + measurement => "m 3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${payload.timestamp 3}" + }}, + {"m4 field=\"\\\"field\\\\4\\\"\"", #{ + measurement => "m4", + tags => [], + fields => [{"field", "\"field\\4\""}], + timestamp => undefined + }}, + { + "m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5," + "field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5}", + #{ + measurement => "m5,mA", + tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}], + fields => [ + {" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"} + ], + timestamp => "${timestamp5}" + } + }, + {"m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\"", + #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }}, + { + "\\ \\ m7\\ \\ ,tag=\\ tag\\,7\\ ,tag_a=\"tag7a\",tag_b\\,tag1=tag7b field=\"field7\"," + "field_a=field7a,field_b=\"field7b\\\\\n\"", + #{ + measurement => " m7 ", + tags => [{"tag", " tag,7 "}, {"tag_a", "\"tag7a\""}, {"tag_b,tag1", "tag7b"}], + fields => [{"field", "field7"}, {"field_a", "field7a"}, {"field_b", "field7b\\\n"}], + timestamp => undefined + } + }, + { + "m8,tag=tag8,tag_a=\"tag8a\",tag_b=tag8b field=\"field8\",field_a=field8a," + "field_b=\"\\\"field\\\" = 8b\" ${timestamp8}", + #{ + measurement => "m8", + tags => [{"tag", "tag8"}, {"tag_a", "\"tag8a\""}, {"tag_b", "tag8b"}], + fields => [{"field", "field8"}, {"field_a", "field8a"}, {"field_b", "\"field\" = 8b"}], + timestamp => "${timestamp8}" + } + }, + {"m\\9,tag=tag9,tag_a=\"tag9a\",tag_b=tag9b field\\=field=\"field9\",field_a=field9a,field_b=\"\" ${timestamp9}", + #{ + measurement => "m\\9", + tags => [{"tag", "tag9"}, {"tag_a", "\"tag9a\""}, {"tag_b", "tag9b"}], + fields => [{"field=field", "field9"}, {"field_a", "field9a"}, {"field_b", ""}], + timestamp => "${timestamp9}" + }}, + {"m\\,10 \"field\\\\\"=\"\" ${timestamp10}", #{ + measurement => "m,10", + tags => [], + %% backslash should not be un-escaped in tag key + fields => [{"\"field\\\\\"", ""}], + timestamp => "${timestamp10}" + }} +]). + +-define(VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS, [ + {" \n m\\ =1\\,,\\,tag\\ \\==\\=tag\\ 1\\, \\,fie\\ ld\\ =\\ field\\,1 ${timestamp1} ", #{ + measurement => "m =1,", + tags => [{",tag =", "=tag 1,"}], + fields => [{",fie ld ", " field,1"}], + timestamp => "${timestamp1}" + }}, + {" m2,tag=tag2 field=\"field \\\"2\\\",\n\" ", #{ + measurement => "m2", + tags => [{"tag", "tag2"}], + fields => [{"field", "field \"2\",\n"}], + timestamp => undefined + }}, + {" m\\ 3 field=\"field3\" ${payload.timestamp\\ 3} ", #{ + measurement => "m 3", + tags => [], + fields => [{"field", "field3"}], + timestamp => "${payload.timestamp 3}" + }}, + {" m4 field=\"\\\"field\\\\4\\\"\" ", #{ + measurement => "m4", + tags => [], + fields => [{"field", "\"field\\4\""}], + timestamp => undefined + }}, + { + " m5\\,mA,tag=\\=tag5\\=,\\,tag_a\\,=tag\\ 5a,tag_b=tag5b \\ field\\ =field5," + "field\\ _\\ a=field5a,\\,field_b\\ =\\=\\,\\ field5b ${timestamp5} ", + #{ + measurement => "m5,mA", + tags => [{"tag", "=tag5="}, {",tag_a,", "tag 5a"}, {"tag_b", "tag5b"}], + fields => [ + {" field ", "field5"}, {"field _ a", "field5a"}, {",field_b ", "=, field5b"} + ], + timestamp => "${timestamp5}" + } + }, + {" m6,tag=tag6,tag_a=tag6a,tag_b=tag6b field=\"field6\",field_a=\"field6a\",field_b=\"field6b\" ", + #{ + measurement => "m6", + tags => [{"tag", "tag6"}, {"tag_a", "tag6a"}, {"tag_b", "tag6b"}], + fields => [{"field", "field6"}, {"field_a", "field6a"}, {"field_b", "field6b"}], + timestamp => undefined + }} +]). + +invalid_write_syntax_line_test_() -> + [?_assertThrow(_, to_influx_lines(L)) || L <- ?INVALID_LINES]. + +invalid_write_syntax_multiline_test_() -> + LinesList = [ + join("\n", ?INVALID_LINES), + join("\n\n\n", ?INVALID_LINES), + join("\n\n", lists:reverse(?INVALID_LINES)) + ], + [?_assertThrow(_, to_influx_lines(Lines)) || Lines <- LinesList]. + +valid_write_syntax_test_() -> + test_pairs(?VALID_LINE_PARSED_PAIRS). + +valid_write_syntax_with_extra_spaces_test_() -> + test_pairs(?VALID_LINE_EXTRA_SPACES_PARSED_PAIRS). + +valid_write_syntax_escaped_chars_test_() -> + test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_PAIRS). + +valid_write_syntax_escaped_chars_with_extra_spaces_test_() -> + test_pairs(?VALID_LINE_PARSED_ESCAPED_CHARS_EXTRA_SPACES_PAIRS). + +test_pairs(PairsList) -> + {Lines, AllExpected} = lists:unzip(PairsList), + JoinedLines = join("\n", Lines), + JoinedLines1 = join("\n\n\n", Lines), + JoinedLines2 = join("\n\n", lists:reverse(Lines)), + SingleLineTests = + [ + ?_assertEqual([Expected], to_influx_lines(Line)) + || {Line, Expected} <- PairsList + ], + JoinedLinesTests = + [ + ?_assertEqual(AllExpected, to_influx_lines(JoinedLines)), + ?_assertEqual(AllExpected, to_influx_lines(JoinedLines1)), + ?_assertEqual(lists:reverse(AllExpected), to_influx_lines(JoinedLines2)) + ], + SingleLineTests ++ JoinedLinesTests. + +join(Sep, LinesList) -> + lists:flatten(lists:join(Sep, LinesList)). + +to_influx_lines(RawLines) -> + OldLevel = emqx_logger:get_primary_log_level(), + try + %% mute error logs from this call + emqx_logger:set_primary_log_level(none), + emqx_bridge_greptimedb:to_influx_lines(RawLines) + after + emqx_logger:set_primary_log_level(OldLevel) + end. From 89bce998704a0fedb8f434cee809dd1abf777c76 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 6 Jul 2023 19:11:20 +0800 Subject: [PATCH 54/73] test: greptimedb data brige --- .tool-versions | 2 +- apps/emqx_bridge/src/emqx_bridge.erl | 2 +- .../src/schema/emqx_bridge_enterprise.erl | 23 +- apps/emqx_bridge_greptimedb/docker-ct | 2 + apps/emqx_bridge_greptimedb/rebar.config | 2 +- .../src/emqx_bridge_greptimedb.erl | 4 +- .../src/emqx_bridge_greptimedb_connector.erl | 20 +- .../test/emqx_bridge_greptimedb_SUITE.erl | 1003 +++++++++++++++++ 8 files changed, 1043 insertions(+), 15 deletions(-) create mode 100644 apps/emqx_bridge_greptimedb/docker-ct create mode 100644 apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl diff --git a/.tool-versions b/.tool-versions index 3a2251dc8..0dbab2a1d 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -erlang 25.3.2-1 +erlang 25.3.2.3 elixir 1.14.5-otp-25 diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 612481663..b60276910 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -90,7 +90,7 @@ T == oracle; T == iotdb; T == kinesis_producer; - T == greptimedb + T == greptimedb_grpc_v1 ). -define(ROOT_KEY, bridges). diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index 7b9e1d4fa..048dcbf90 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -50,7 +50,7 @@ api_schemas(Method) -> api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method), api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method), api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"), - api_ref(emqx_bridge_greptimedb, Method) + api_ref(emqx_bridge_greptimedb, <<"greptimedb_grpc_v1">>, Method ++ "_grpc_v1") ]. schema_modules() -> @@ -124,8 +124,7 @@ resource_type(oracle) -> emqx_oracle; resource_type(iotdb) -> emqx_bridge_iotdb_impl; resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector; resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer. -resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector. -resource_type(greptimedb) -> emqx_bridge_greptimedb_connector. +resource_type(greptimedb_grpc_v1) -> emqx_bridge_greptimedb_connector. fields(bridges) -> [ @@ -214,7 +213,8 @@ fields(bridges) -> influxdb_structs() ++ redis_structs() ++ pgsql_structs() ++ clickhouse_structs() ++ sqlserver_structs() ++ rabbitmq_structs() ++ - kinesis_structs(). + kinesis_structs() ++ + greptimedb_structs(). mongodb_structs() -> [ @@ -299,6 +299,21 @@ influxdb_structs() -> ] ]. +greptimedb_structs() -> + [ + {Protocol, + mk( + hoconsc:map(name, ref(emqx_bridge_greptimedb, Protocol)), + #{ + desc => <<"GreptimeDB Bridge Config">>, + required => false + } + )} + || Protocol <- [ + greptimedb_grpc_v1 + ] + ]. + redis_structs() -> [ {Type, diff --git a/apps/emqx_bridge_greptimedb/docker-ct b/apps/emqx_bridge_greptimedb/docker-ct new file mode 100644 index 000000000..1a9647132 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +greptimedb diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index 952281286..2001a72fc 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -6,7 +6,7 @@ {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.1"}}} + {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {branch, "feature/check-auth"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl index f37ddf320..5bd8f6852 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -47,7 +47,7 @@ values("greptimedb_grpc_v1", post) -> bucket => <<"example_bucket">>, org => <<"examlpe_org">>, token => <<"example_token">>, - server => <<"127.0.0.1:4000">> + server => <<"127.0.0.1:4001">> }, values(common, "greptimedb_grpc_v1", SupportUint, TypeOpts); values(Protocol, put) -> @@ -68,7 +68,7 @@ values(common, Protocol, SupportUint, TypeOpts) -> batch_size => 100, batch_time => <<"20ms">> }, - server => <<"127.0.0.1:4000">>, + server => <<"127.0.0.1:4001">>, ssl => #{enable => false} }, maps:merge(TypeOpts, CommonConfigs). diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index a02df09c5..bc4eacbab 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -39,7 +39,7 @@ %% Allocatable resources -define(greptime_client, greptime_client). --define(GREPTIMEDB_DEFAULT_PORT, 4000). +-define(GREPTIMEDB_DEFAULT_PORT, 4001). -define(DEFAULT_DB, <<"public">>). @@ -81,7 +81,7 @@ on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, c #{batch => false, mode => sync, error => ErrorPoints} ), log_error_points(InstId, ErrorPoints), - {error, {unrecoverable_error, ErrorPoints}} + ErrorPoints end. %% Once a Batched Data trans to points failed. @@ -140,13 +140,21 @@ fields(common) -> fields(greptimedb_grpc_v1) -> fields(common) ++ [ - {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})} + {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})}, + {username, mk(binary(), #{desc => ?DESC("username")})}, + {password, + mk(binary(), #{ + desc => ?DESC("password"), + format => <<"password">>, + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} ] ++ emqx_connector_schema_lib:ssl_fields(). server() -> Meta = #{ required => false, - default => <<"127.0.0.1:4000">>, + default => <<"127.0.0.1:4001">>, desc => ?DESC("server"), converter => fun convert_server/2 }, @@ -477,7 +485,7 @@ line_to_point( {_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), TableName = emqx_placeholder:proc_tmpl(Measurement, Data), {TableName, [ - maps:without([precision], Item#{ + maps:without([precision, measurement], Item#{ tags => EncodedTags, fields => EncodedFields, timestamp => maybe_convert_time_unit(Ts, Precision) @@ -539,7 +547,7 @@ value_type([<<"FALSE">>]) -> value_type([<<"False">>]) -> greptimedb_values:boolean_value(false); value_type(Val) -> - #{values => #{string_values => Val, datatype => 'STRING'}}. + #{values => #{string_values => Val}, datatype => 'STRING'}. key_filter(undefined) -> undefined; key_filter(Value) -> emqx_utils_conv:bin(Value). diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl new file mode 100644 index 000000000..e694060f5 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -0,0 +1,1003 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_greptimedb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, [ + {group, sync_query} + ]}, + {without_batch, [ + {group, sync_query} + ]}, + {sync_query, [ + {group, grpcv1_tcp}, + {group, grpcv1_tls} + ]}, + {grpcv1_tcp, TCs}, + {grpcv1_tls, TCs} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + delete_all_bridges(), + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_connector_test_helpers:stop_apps([ + emqx_conf, emqx_bridge, emqx_resource, emqx_rule_engine + ]), + _ = application:stop(emqx_connector), + ok. + +init_per_group(GreptimedbType, Config0) when + GreptimedbType =:= grpcv1_tcp; + GreptimedbType =:= grpcv1_tls +-> + #{ + host := GreptimedbHost, + port := GreptimedbPort, + use_tls := UseTLS, + proxy_name := ProxyName + } = + case GreptimedbType of + grpcv1_tcp -> + #{ + host => os:getenv("GREPTIMEDB_GRPCV1_TCP_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TCP_PORT", "4001")), + use_tls => false, + proxy_name => "greptimedb_tcp" + }; + grpcv1_tls -> + #{ + host => os:getenv("GREPTIMEDB_GRPCV1_TLS_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TLS_PORT", "4001")), + use_tls => true, + proxy_name => "greptimedb_tls" + } + end, + case emqx_common_test_helpers:is_tcp_server_available(GreptimedbHost, GreptimedbPort) of + true -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = start_apps(), + {ok, _} = application:ensure_all_started(emqx_connector), + application:ensure_all_started(greptimedb), + emqx_mgmt_api_test_util:init_suite(), + Config = [{use_tls, UseTLS} | Config0], + {Name, ConfigString, GreptimedbConfig} = greptimedb_config( + grpcv1, GreptimedbHost, GreptimedbPort, Config + ), + EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_grpcv1">>, + EHttpcPoolName = binary_to_atom(EHttpcPoolNameBin), + {EHttpcTransport, EHttpcTransportOpts} = + case UseTLS of + true -> {tls, [{verify, verify_none}]}; + false -> {tcp, []} + end, + EHttpcPoolOpts = [ + {host, GreptimedbHost}, + {port, GreptimedbPort}, + {pool_size, 1}, + {transport, EHttpcTransport}, + {transport_opts, EHttpcTransportOpts} + ], + {ok, _} = ehttpc_sup:start_pool(EHttpcPoolName, EHttpcPoolOpts), + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {proxy_name, ProxyName}, + {greptimedb_host, GreptimedbHost}, + {greptimedb_port, GreptimedbPort}, + {greptimedb_type, grpcv1}, + {greptimedb_config, GreptimedbConfig}, + {greptimedb_config_string, ConfigString}, + {ehttpc_pool_name, EHttpcPoolName}, + {greptimedb_name, Name} + | Config + ]; + false -> + {skip, no_greptimedb} + end; +init_per_group(sync_query, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config) -> + [{batch_size, 100} | Config]; +init_per_group(without_batch, Config) -> + [{batch_size, 1} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= grpcv1_tcp; + Group =:= grpcv1_tls +-> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + EHttpcPoolName = ?config(ehttpc_pool_name, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ehttpc_sup:stop_pool(EHttpcPoolName), + delete_bridge(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_Testcase, Config) -> + delete_all_rules(), + delete_all_bridges(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + ok = snabbkaffe:stop(), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_rules(), + delete_all_bridges(), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +start_apps() -> + %% some configs in emqx_conf app are mandatory + %% we want to make sure they are loaded before + %% ekka start in emqx_common_test_helpers:start_apps/1 + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]). + +example_write_syntax() -> + %% N.B.: this single space character is relevant + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "float_value=${payload.float_key},", "undef_value=${payload.undef},", + "${undef_key}=\"hard-coded-value\",", "bool=${payload.bool}">>. + +greptimedb_config(grpcv1 = Type, GreptimedbHost, GreptimedbPort, Config) -> + BatchSize = proplists:get_value(batch_size, Config, 100), + QueryMode = proplists:get_value(query_mode, Config, sync), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = atom_to_binary(?MODULE), + WriteSyntax = example_write_syntax(), + ConfigString = + io_lib:format( + "bridges.greptimedb_grpc_v1.~s {\n" + " enable = true\n" + " server = \"~p:~b\"\n" + " dbname = public\n" + " username = greptime_user\n" + " password = greptime_pwd\n" + " precision = ns\n" + " write_syntax = \"~s\"\n" + " resource_opts = {\n" + " request_ttl = 1s\n" + " query_mode = ~s\n" + " batch_size = ~b\n" + " }\n" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " }\n" + "}\n", + [ + Name, + GreptimedbHost, + GreptimedbPort, + WriteSyntax, + QueryMode, + BatchSize, + UseTLS + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Type, Name)}. + +parse_and_check(ConfigString, Type, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = greptimedb_type_bin(Type), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +greptimedb_type_bin(grpcv1) -> + <<"greptimedb_grpc_v1">>. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + Name = ?config(greptimedb_name, Config), + GreptimedbConfig0 = ?config(greptimedb_config, Config), + GreptimedbConfig = emqx_utils_maps:deep_merge(GreptimedbConfig0, Overrides), + emqx_bridge:create(Type, Name, GreptimedbConfig). + +delete_bridge(Config) -> + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + Name = ?config(greptimedb_name, Config), + emqx_bridge:remove(Type, Name). + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +delete_all_rules() -> + lists:foreach( + fun(#{id := RuleId}) -> + ok = emqx_rule_engine:delete_rule(RuleId) + end, + emqx_rule_engine:get_rules() + ). + +create_rule_and_action_http(Config) -> + create_rule_and_action_http(Config, _Overrides = #{}). + +create_rule_and_action_http(Config, Overrides) -> + GreptimedbName = ?config(greptimedb_name, Config), + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName), + Params0 = #{ + enable => true, + sql => <<"SELECT * FROM \"t/topic\"">>, + actions => [BridgeId] + }, + Params = emqx_utils_maps:deep_merge(Params0, Overrides), + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(greptimedb_name, Config), + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + BridgeId = emqx_bridge_resource:bridge_id(Type, Name), + emqx_bridge:send_message(BridgeId, Payload). + +query_by_clientid(ClientId, Config) -> + GreptimedbHost = ?config(greptimedb_host, Config), + GreptimedbPort = ?config(greptimedb_port, Config), + EHttpcPoolName = ?config(ehttpc_pool_name, Config), + UseTLS = ?config(use_tls, Config), + Path = <<"/api/v2/query?org=emqx">>, + Scheme = + case UseTLS of + true -> <<"https://">>; + false -> <<"http://">> + end, + URI = iolist_to_binary([ + Scheme, + list_to_binary(GreptimedbHost), + ":", + integer_to_binary(GreptimedbPort), + Path + ]), + Query = + << + "from(bucket: \"mqtt\")\n" + " |> range(start: -12h)\n" + " |> filter(fn: (r) => r.clientid == \"", + ClientId/binary, + "\")" + >>, + Headers = [ + {"Authorization", "Token abcdefg"}, + {"Content-Type", "application/json"} + ], + Body = + emqx_utils_json:encode(#{ + query => Query, + dialect => #{ + header => true, + delimiter => <<";">> + } + }), + {ok, 200, _Headers, RawBody0} = + ehttpc:request( + EHttpcPoolName, + post, + {URI, Headers, Body}, + _Timeout = 10_000, + _Retry = 0 + ), + RawBody1 = iolist_to_binary(string:replace(RawBody0, <<"\r\n">>, <<"\n">>, all)), + {ok, DecodedCSV0} = erl_csv:decode(RawBody1, #{separator => <<$;>>}), + DecodedCSV1 = [ + [Field || Field <- Line, Field =/= <<>>] + || Line <- DecodedCSV0, + Line =/= [<<>>] + ], + DecodedCSV2 = csv_lines_to_maps(DecodedCSV1, []), + index_by_field(DecodedCSV2). + +decode_csv(RawBody) -> + Lines = + [ + binary:split(Line, [<<";">>], [global, trim_all]) + || Line <- binary:split(RawBody, [<<"\r\n">>], [global, trim_all]) + ], + csv_lines_to_maps(Lines, []). + +csv_lines_to_maps([Fields, Data | Rest], Acc) -> + Map = maps:from_list(lists:zip(Fields, Data)), + csv_lines_to_maps(Rest, [Map | Acc]); +csv_lines_to_maps(_Data, Acc) -> + lists:reverse(Acc). + +index_by_field(DecodedCSV) -> + maps:from_list([{Field, Data} || Data = #{<<"_field">> := Field} <- DecodedCSV]). + +assert_persisted_data(ClientId, Expected, PersistedData) -> + ClientIdIntKey = <>, + maps:foreach( + fun + (int_value, ExpectedValue) -> + ?assertMatch( + #{<<"_value">> := ExpectedValue}, + maps:get(ClientIdIntKey, PersistedData) + ); + (Key, ExpectedValue) -> + ?assertMatch( + #{<<"_value">> := ExpectedValue}, + maps:get(atom_to_binary(Key), PersistedData), + #{expected => ExpectedValue} + ) + end, + Expected + ), + ok. + +resource_id(Config) -> + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + Name = ?config(greptimedb_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_ok(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => true, + float_key => 24.5, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"payload">> => Payload, + <<"timestamp">> => erlang:system_time(millisecond) + }, + ?check_trace( + begin + case QueryMode of + sync -> + ?assertMatch(ok, send_message(Config, SentData)) + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{ + bool => <<"true">>, + int_value => <<"-123">>, + uint_value => <<"123">>, + float_value => <<"24.5">>, + payload => emqx_utils_json:encode(Payload) + }, + assert_persisted_data(ClientId, Expected, PersistedData), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(greptimedb_connector_send_query, Trace0), + ?assertMatch([#{points := [_]}], Trace), + [#{points := [Point]}] = Trace, + ct:pal("sent point: ~p", [Point]), + ?assertMatch( + #{ + fields := #{}, + measurement := <<_/binary>>, + tags := #{}, + timestamp := TS + } when is_integer(TS), + Point + ), + #{fields := Fields} = Point, + ?assert(lists:all(fun is_binary/1, maps:keys(Fields))), + ?assertNot(maps:is_key(<<"undefined">>, Fields)), + ?assertNot(maps:is_key(<<"undef_value">>, Fields)), + ok + end + ), + ok. + +t_start_already_started(Config) -> + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + Name = ?config(greptimedb_name, Config), + GreptimedbConfigString = ?config(greptimedb_config_string, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ResourceId = resource_id(Config), + TypeAtom = binary_to_atom(Type), + NameAtom = binary_to_atom(Name), + {ok, #{bridges := #{TypeAtom := #{NameAtom := GreptimedbConfigMap}}}} = emqx_hocon:check( + emqx_bridge_schema, GreptimedbConfigString + ), + ?check_trace( + emqx_bridge_greptimedb_connector:on_start(ResourceId, GreptimedbConfigMap), + fun(Result, Trace) -> + ?assertMatch({ok, _}, Result), + ?assertMatch([_], ?of_kind(greptimedb_connector_start_already_started, Trace)), + ok + end + ), + ok. + +t_start_ok_timestamp_write_syntax(Config) -> + GreptimedbType = ?config(greptimedb_type, Config), + GreptimedbName = ?config(greptimedb_name, Config), + GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), + GreptimedbTypeCfg = + case GreptimedbType of + grpcv1 -> "greptimedb_grpc_v1" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "${timestamp}">>, + %% append this to override the config + GreptimedbConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [GreptimedbTypeCfg, GreptimedbName, WriteSyntax] + ), + GreptimedbConfig1 = parse_and_check( + GreptimedbConfigString0 ++ GreptimedbConfigString1, + GreptimedbType, + GreptimedbName + ), + Config1 = [{greptimedb_config, GreptimedbConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ok. + +t_start_ok_no_subject_tags_write_syntax(Config) -> + GreptimedbType = ?config(greptimedb_type, Config), + GreptimedbName = ?config(greptimedb_name, Config), + GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), + GreptimedbTypeCfg = + case GreptimedbType of + grpcv1 -> "greptimedb_grpc_v1" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "${timestamp}">>, + %% append this to override the config + GreptimedbConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [GreptimedbTypeCfg, GreptimedbName, WriteSyntax] + ), + GreptimedbConfig1 = parse_and_check( + GreptimedbConfigString0 ++ GreptimedbConfigString1, + GreptimedbType, + GreptimedbName + ), + Config1 = [{greptimedb_config, GreptimedbConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ok. + +t_const_timestamp(Config) -> + QueryMode = ?config(query_mode, Config), + Const = erlang:system_time(nanosecond), + ConstBin = integer_to_binary(Const), + TsStr = iolist_to_binary( + calendar:system_time_to_rfc3339(Const, [{unit, nanosecond}, {offset, "Z"}]) + ), + ?assertMatch( + {ok, _}, + create_bridge( + Config, + #{ + <<"write_syntax">> => + <<"mqtt,clientid=${clientid} foo=${payload.foo}i,bar=5i ", ConstBin/binary>> + } + ) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{<<"foo">> => 123}, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"payload">> => Payload, + <<"timestamp">> => erlang:system_time(millisecond) + }, + case QueryMode of + sync -> + ?assertMatch(ok, send_message(Config, SentData)) + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{foo => <<"123">>}, + assert_persisted_data(ClientId, Expected, PersistedData), + TimeReturned0 = maps:get(<<"_time">>, maps:get(<<"foo">>, PersistedData)), + TimeReturned = pad_zero(TimeReturned0), + ?assertEqual(TsStr, TimeReturned). + +%% greptimedb returns timestamps without trailing zeros such as +%% "2023-02-28T17:21:51.63678163Z" +%% while the standard should be +%% "2023-02-28T17:21:51.636781630Z" +pad_zero(BinTs) -> + StrTs = binary_to_list(BinTs), + [Nano | Rest] = lists:reverse(string:tokens(StrTs, ".")), + [$Z | NanoNum] = lists:reverse(Nano), + Padding = lists:duplicate(10 - length(Nano), $0), + NewNano = lists:reverse(NanoNum) ++ Padding ++ "Z", + iolist_to_binary(string:join(lists:reverse([NewNano | Rest]), ".")). + +t_boolean_variants(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + BoolVariants = #{ + true => true, + false => false, + <<"t">> => true, + <<"f">> => false, + <<"T">> => true, + <<"F">> => false, + <<"TRUE">> => true, + <<"FALSE">> => false, + <<"True">> => true, + <<"False">> => false + }, + maps:foreach( + fun(BoolVariant, Translation) -> + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => BoolVariant, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(millisecond), + <<"payload">> => Payload + }, + case QueryMode of + sync -> + ?assertMatch(ok, send_message(Config, SentData)) + end, + case QueryMode of + sync -> ok + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{ + bool => atom_to_binary(Translation), + int_value => <<"-123">>, + uint_value => <<"123">>, + payload => emqx_utils_json:encode(Payload) + }, + assert_persisted_data(ClientId, Expected, PersistedData), + ok + end, + BoolVariants + ), + ok. + +t_bad_timestamp(Config) -> + GreptimedbType = ?config(greptimedb_type, Config), + GreptimedbName = ?config(greptimedb_name, Config), + QueryMode = ?config(query_mode, Config), + BatchSize = ?config(batch_size, Config), + GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), + GreptimedbTypeCfg = + case GreptimedbType of + grpcv1 -> "greptimedb_grpc_v1" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "bad_timestamp">>, + %% append this to override the config + GreptimedbConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [GreptimedbTypeCfg, GreptimedbName, WriteSyntax] + ), + GreptimedbConfig1 = parse_and_check( + GreptimedbConfigString0 ++ GreptimedbConfigString1, + GreptimedbType, + GreptimedbName + ), + Config1 = [{greptimedb_config, GreptimedbConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => false, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(millisecond), + <<"payload">> => Payload + }, + ?check_trace( + ?wait_async_action( + send_message(Config1, SentData), + #{?snk_kind := greptimedb_connector_send_query_error}, + 10_000 + ), + fun(Result, _Trace) -> + ?assertMatch({_, {ok, _}}, Result), + {Return, {ok, _}} = Result, + IsBatch = BatchSize > 1, + case {QueryMode, IsBatch} of + {sync, false} -> + ?assertEqual( + {error, [ + {error, {bad_timestamp, <<"bad_timestamp">>}} + ]}, + Return + ); + {sync, true} -> + ?assertEqual({error, {unrecoverable_error, points_trans_failed}}, Return) + end, + ok + end + ), + ok. + +t_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + {ok, _} = create_bridge(Config), + ResourceId = resource_id(Config), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)) + end), + fun(Trace) -> + ?assertMatch( + [#{error := greptimedb_client_not_alive, reason := econnrefused}], + ?of_kind(greptimedb_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_start_error(Config) -> + %% simulate client start error + ?check_trace( + emqx_common_test_helpers:with_mock( + greptimedb, + start_client, + fun(_Config) -> {error, some_error} end, + fun() -> + ?wait_async_action( + ?assertMatch({ok, _}, create_bridge(Config)), + #{?snk_kind := greptimedb_connector_start_failed}, + 10_000 + ) + end + ), + fun(Trace) -> + ?assertMatch( + [#{error := some_error}], + ?of_kind(greptimedb_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_start_exception(Config) -> + %% simulate client start exception + ?check_trace( + emqx_common_test_helpers:with_mock( + greptimedb, + start_client, + fun(_Config) -> error(boom) end, + fun() -> + ?wait_async_action( + ?assertMatch({ok, _}, create_bridge(Config)), + #{?snk_kind := greptimedb_connector_start_exception}, + 10_000 + ) + end + ), + fun(Trace) -> + ?assertMatch( + [#{error := {error, boom}}], + ?of_kind(greptimedb_connector_start_exception, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => true, + float_key => 24.5, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(millisecond), + <<"payload">> => Payload + }, + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + case QueryMode of + sync -> + {_, {ok, _}} = + ?wait_async_action( + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + send_message(Config, SentData) + ), + #{?snk_kind := handle_async_reply, action := nack}, + 1_000 + ) + end + end), + fun(Trace0) -> + case QueryMode of + sync -> + Trace = ?of_kind(handle_async_reply, Trace0), + ?assertMatch([_ | _], Trace), + [#{result := Result} | _] = Trace, + ?assert( + not emqx_bridge_greptimedb_connector:is_unrecoverable_error(Result), + #{got => Result} + ) + end, + ok + end + ), + ok. + +t_missing_field(Config) -> + BatchSize = ?config(batch_size, Config), + IsBatch = BatchSize > 1, + {ok, _} = + create_bridge( + Config, + #{ + <<"resource_opts">> => #{<<"worker_pool_size">> => 1}, + <<"write_syntax">> => <<"${clientid} foo=${foo}i">> + } + ), + %% note: we don't select foo here, but we interpolate it in the + %% fields, so it'll become undefined. + {ok, _} = create_rule_and_action_http(Config, #{sql => <<"select * from \"t/topic\"">>}), + ClientId0 = emqx_guid:to_hexstr(emqx_guid:gen()), + ClientId1 = emqx_guid:to_hexstr(emqx_guid:gen()), + %% Message with the field that we "forgot" to select in the rule + Msg0 = emqx_message:make(ClientId0, <<"t/topic">>, emqx_utils_json:encode(#{foo => 123})), + %% Message without any fields + Msg1 = emqx_message:make(ClientId1, <<"t/topic">>, emqx_utils_json:encode(#{})), + ?check_trace( + begin + emqx:publish(Msg0), + emqx:publish(Msg1), + NEvents = 1, + {ok, _} = + snabbkaffe:block_until( + ?match_n_events(NEvents, #{ + ?snk_kind := greptimedb_connector_send_query_error + }), + _Timeout1 = 10_000 + ), + ok + end, + fun(Trace) -> + PersistedData0 = query_by_clientid(ClientId0, Config), + PersistedData1 = query_by_clientid(ClientId1, Config), + case IsBatch of + true -> + ?assertMatch( + [#{error := points_trans_failed} | _], + ?of_kind(greptimedb_connector_send_query_error, Trace) + ); + false -> + ?assertMatch( + [#{error := [{error, no_fields}]} | _], + ?of_kind(greptimedb_connector_send_query_error, Trace) + ) + end, + %% nothing should have been persisted + ?assertEqual(#{}, PersistedData0), + ?assertEqual(#{}, PersistedData1), + ok + end + ), + ok. + +t_authentication_error(Config0) -> + GreptimedbType = ?config(greptimedb_type, Config0), + GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), + GreptimeConfig = + case GreptimedbType of + grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>} + end, + Config = lists:keyreplace(greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig}), + ?check_trace( + begin + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := greptimedb_connector_start_failed}, + 10_000 + ) + end, + fun(Trace) -> + ?assertMatch( + [#{error := auth_error} | _], + ?of_kind(greptimedb_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_authentication_error_on_get_status(Config0) -> + ResourceId = resource_id(Config0), + + % Fake initialization to simulate credential update after bridge was created. + emqx_common_test_helpers:with_mock( + greptimedb, + check_auth, + fun(_) -> + ok + end, + fun() -> + GreptimedbType = ?config(greptimedb_type, Config0), + GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), + GreptimeConfig = + case GreptimedbType of + grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>} + end, + Config = lists:keyreplace( + greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig} + ), + {ok, _} = create_bridge(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 10, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ) + end + ), + + % Now back to wrong credentials + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), + ok. + +t_authentication_error_on_send_message(Config0) -> + ResourceId = resource_id(Config0), + QueryMode = proplists:get_value(query_mode, Config0, sync), + GreptimedbType = ?config(greptimedb_type, Config0), + GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), + GreptimeConfig = + case GreptimedbType of + grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>} + end, + Config = lists:keyreplace(greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig}), + + % Fake initialization to simulate credential update after bridge was created. + emqx_common_test_helpers:with_mock( + greptimedb, + check_auth, + fun(_) -> + ok + end, + fun() -> + {ok, _} = create_bridge(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 10, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ) + end + ), + + % Now back to wrong credentials + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => true, + float_key => 24.5, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(millisecond), + <<"payload">> => Payload + }, + case QueryMode of + sync -> + ?assertMatch( + {error, {unrecoverable_error, <<"authorization failure">>}}, + send_message(Config, SentData) + ) + end, + ok. From 975795a6e0d21069d30d57b33144ab5c94b6ce3f Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 7 Jul 2023 19:27:21 +0800 Subject: [PATCH 55/73] feat: add ci test --- .../docker-compose-greptimedb.yaml | 15 ++++++ .../docker-compose-toxiproxy.yaml | 3 ++ .ci/docker-compose-file/toxiproxy.json | 11 ++++ .../src/schema/emqx_bridge_enterprise.erl | 8 --- .../src/emqx_bridge_greptimedb.erl | 1 + .../test/emqx_bridge_greptimedb_SUITE.erl | 9 ++-- rel/i18n/emqx_bridge_greptimedb.hocon | 50 +++++++++++++++++++ .../emqx_bridge_greptimedb_connector.hocon | 47 +++++++++++++++++ scripts/ct/run.sh | 3 ++ 9 files changed, 135 insertions(+), 12 deletions(-) create mode 100644 .ci/docker-compose-file/docker-compose-greptimedb.yaml create mode 100644 rel/i18n/emqx_bridge_greptimedb.hocon create mode 100644 rel/i18n/emqx_bridge_greptimedb_connector.hocon diff --git a/.ci/docker-compose-file/docker-compose-greptimedb.yaml b/.ci/docker-compose-file/docker-compose-greptimedb.yaml new file mode 100644 index 000000000..f379969bd --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-greptimedb.yaml @@ -0,0 +1,15 @@ +version: '3.9' + +services: + greptimedb: + container_name: greptimedb + image: greptime/greptimedb:0.3.2 + expose: + - "4000" + - "4001" + restart: always + networks: + - emqx_bridge + command: + standalone start + --user-provider=static_user_provider:cmd:greptime_user=greptime_pwd diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index 74d2583c9..d648d9d78 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -51,6 +51,9 @@ services: - 15670:5670 # Kinesis - 4566:4566 + # GreptimeDB + - 4000:4000 + - 4001:4001 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index c9590354b..a8e2f086c 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -160,6 +160,17 @@ "name": "hstreamdb", "listen": "0.0.0.0:6570", "upstream": "hstreamdb:6570", + }, + { + "name": "greptimedb_http", + "listen": "0.0.0.0:4000", + "upstream": "iotdb:4000", + "enabled": true + }, + { + "name": "greptimedb_grpc", + "listen": "0.0.0.0:4001", + "upstream": "iotdb:4001", "enabled": true }, { diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index 048dcbf90..4a0428675 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -200,14 +200,6 @@ fields(bridges) -> desc => <<"Apache IoTDB Bridge Config">>, required => false } - )}, - {greptimedb, - mk( - hoconsc:map(name, ref(emqx_bridge_greptimedb, "config")), - #{ - desc => <<"GreptimeDB Bridge Config">>, - required => false - } )} ] ++ kafka_structs() ++ pulsar_structs() ++ gcp_pubsub_structs() ++ mongodb_structs() ++ influxdb_structs() ++ diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl index 5bd8f6852..415544fcf 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -107,6 +107,7 @@ method_fields(put, ConnectorType) -> greptimedb_bridge_common_fields() -> emqx_bridge_schema:common_bridge_fields() ++ [ + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, {write_syntax, fun write_syntax/1} ] ++ emqx_resource_schema:fields("resource_opts"). diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index e694060f5..57ffed926 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -30,11 +30,12 @@ groups() -> {group, sync_query} ]}, {sync_query, [ - {group, grpcv1_tcp}, - {group, grpcv1_tls} + {group, grpcv1_tcp} + %% uncomment tls when we are ready + %% {group, grpcv1_tls} ]}, - {grpcv1_tcp, TCs}, - {grpcv1_tls, TCs} + {grpcv1_tcp, TCs} + %%{grpcv1_tls, TCs} ]. init_per_suite(Config) -> diff --git a/rel/i18n/emqx_bridge_greptimedb.hocon b/rel/i18n/emqx_bridge_greptimedb.hocon new file mode 100644 index 000000000..939ed48d3 --- /dev/null +++ b/rel/i18n/emqx_bridge_greptimedb.hocon @@ -0,0 +1,50 @@ +emqx_bridge_greptimedb { + +config_enable.desc: +"""Enable or disable this bridge.""" + +config_enable.label: +"""Enable Or Disable Bridge""" + +desc_config.desc: +"""Configuration for an GreptimeDB bridge.""" + +desc_config.label: +"""GreptimeDB Bridge Configuration""" + +desc_name.desc: +"""Bridge name.""" + +desc_name.label: +"""Bridge Name""" + +desc_type.desc: +"""The Bridge Type.""" + +desc_type.label: +"""Bridge Type""" + +local_topic.desc: +"""The MQTT topic filter to be forwarded to the GreptimeDB. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded.""" + +local_topic.label: +"""Local Topic""" + +write_syntax.desc: +"""Conf of GreptimeDB gRPC protocol to write data points.The write syntax is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported, which is the same as InfluxDB line protocol. +See also [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) and +[GreptimeDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +Please note that a placeholder for an integer value must be annotated with a suffix `i`. For example `${payload.int_value}i`.""" + +write_syntax.label: +"""Write Syntax""" + +} diff --git a/rel/i18n/emqx_bridge_greptimedb_connector.hocon b/rel/i18n/emqx_bridge_greptimedb_connector.hocon new file mode 100644 index 000000000..87370b211 --- /dev/null +++ b/rel/i18n/emqx_bridge_greptimedb_connector.hocon @@ -0,0 +1,47 @@ +emqx_bridge_greptimedb_connector { + +dbname.desc: +"""GreptimeDB database.""" + +dbname.label: +"""Database""" + +greptimedb_grpc_v1.desc: +"""GreptimeDB's protocol. Support GreptimeDB v1.8 and before.""" + +greptimedb_grpc_v1.label: +"""HTTP API Protocol""" + +password.desc: +"""GreptimeDB password.""" + +password.label: +"""Password""" + +precision.desc: +"""GreptimeDB time precision.""" + +precision.label: +"""Time Precision""" + +protocol.desc: +"""GreptimeDB's protocol. gRPC API.""" + +protocol.label: +"""Protocol""" + +server.desc: +"""The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
+The GreptimeDB default port 8086 is used if `[:Port]` is not specified.""" + +server.label: +"""Server Host""" + +username.desc: +"""GreptimeDB username.""" + +username.label: +"""Username""" + +} diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index 785d4065d..578b9c4de 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -222,6 +222,9 @@ for dep in ${CT_DEPS}; do kinesis) FILES+=( '.ci/docker-compose-file/docker-compose-kinesis.yaml' ) ;; + greptimedb) + FILES+=( '.ci/docker-compose-file/docker-compose-greptimedb.yaml' ) + ;; *) echo "unknown_ct_dependency $dep" exit 1 From c6a7f3e2ade7769960b560ccd932593cca437c62 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Sun, 16 Jul 2023 17:46:04 +0800 Subject: [PATCH 56/73] test: make test passed 21/29 --- .../docker-compose-greptimedb.yaml | 7 + .ci/docker-compose-file/toxiproxy.json | 5 +- .../src/emqx_bridge_greptimedb_connector.erl | 2 +- .../test/emqx_bridge_greptimedb_SUITE.erl | 200 +++++------------- 4 files changed, 69 insertions(+), 145 deletions(-) diff --git a/.ci/docker-compose-file/docker-compose-greptimedb.yaml b/.ci/docker-compose-file/docker-compose-greptimedb.yaml index f379969bd..8980c946d 100644 --- a/.ci/docker-compose-file/docker-compose-greptimedb.yaml +++ b/.ci/docker-compose-file/docker-compose-greptimedb.yaml @@ -3,13 +3,20 @@ version: '3.9' services: greptimedb: container_name: greptimedb + hostname: greptimedb image: greptime/greptimedb:0.3.2 expose: - "4000" - "4001" + # uncomment for local testing + # ports: + # - "4000:4000" + # - "4001:4001" restart: always networks: - emqx_bridge command: standalone start --user-provider=static_user_provider:cmd:greptime_user=greptime_pwd + --http-addr="0.0.0.0:4000" + --rpc-addr="0.0.0.0:4001" diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index a8e2f086c..f5df5a853 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -160,17 +160,18 @@ "name": "hstreamdb", "listen": "0.0.0.0:6570", "upstream": "hstreamdb:6570", + "enabled": true }, { "name": "greptimedb_http", "listen": "0.0.0.0:4000", - "upstream": "iotdb:4000", + "upstream": "greptimedb:4000", "enabled": true }, { "name": "greptimedb_grpc", "listen": "0.0.0.0:4001", - "upstream": "iotdb:4001", + "upstream": "greptimedb:4001", "enabled": true }, { diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index bc4eacbab..43455d5d2 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -312,7 +312,7 @@ do_query(InstId, Client, Points) -> connector => InstId, points => Points }); - {error, {401, _, _}} -> + {error, {unauth, _, _}} -> ?tp(greptimedb_connector_do_query_failure, #{error => <<"authorization failure">>}), ?SLOG(error, #{ msg => "greptimedb_authorization_failed", diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index 57ffed926..044d6a2bd 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -57,6 +57,7 @@ init_per_group(GreptimedbType, Config0) when #{ host := GreptimedbHost, port := GreptimedbPort, + http_port := GreptimedbHttpPort, use_tls := UseTLS, proxy_name := ProxyName } = @@ -65,13 +66,15 @@ init_per_group(GreptimedbType, Config0) when #{ host => os:getenv("GREPTIMEDB_GRPCV1_TCP_HOST", "toxiproxy"), port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TCP_PORT", "4001")), + http_port => list_to_integer(os:getenv("GREPTIMEDB_HTTP_PORT", "4000")), use_tls => false, - proxy_name => "greptimedb_tcp" + proxy_name => "greptimedb_grpc" }; grpcv1_tls -> #{ host => os:getenv("GREPTIMEDB_GRPCV1_TLS_HOST", "toxiproxy"), port => list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TLS_PORT", "4001")), + http_port => list_to_integer(os:getenv("GREPTIMEDB_HTTP_PORT", "4000")), use_tls => true, proxy_name => "greptimedb_tls" } @@ -98,7 +101,7 @@ init_per_group(GreptimedbType, Config0) when end, EHttpcPoolOpts = [ {host, GreptimedbHost}, - {port, GreptimedbPort}, + {port, GreptimedbHttpPort}, {pool_size, 1}, {transport, EHttpcTransport}, {transport_opts, EHttpcTransportOpts} @@ -110,6 +113,7 @@ init_per_group(GreptimedbType, Config0) when {proxy_name, ProxyName}, {greptimedb_host, GreptimedbHost}, {greptimedb_port, GreptimedbPort}, + {greptimedb_http_port, GreptimedbHttpPort}, {greptimedb_type, grpcv1}, {greptimedb_config, GreptimedbConfig}, {greptimedb_config_string, ConfigString}, @@ -282,12 +286,12 @@ send_message(Config, Payload) -> BridgeId = emqx_bridge_resource:bridge_id(Type, Name), emqx_bridge:send_message(BridgeId, Payload). -query_by_clientid(ClientId, Config) -> +query_by_clientid(Topic, ClientId, Config) -> GreptimedbHost = ?config(greptimedb_host, Config), - GreptimedbPort = ?config(greptimedb_port, Config), + GreptimedbPort = ?config(greptimedb_http_port, Config), EHttpcPoolName = ?config(ehttpc_pool_name, Config), UseTLS = ?config(use_tls, Config), - Path = <<"/api/v2/query?org=emqx">>, + Path = <<"/v1/sql?db=public">>, Scheme = case UseTLS of true -> <<"https://">>; @@ -300,26 +304,11 @@ query_by_clientid(ClientId, Config) -> integer_to_binary(GreptimedbPort), Path ]), - Query = - << - "from(bucket: \"mqtt\")\n" - " |> range(start: -12h)\n" - " |> filter(fn: (r) => r.clientid == \"", - ClientId/binary, - "\")" - >>, Headers = [ - {"Authorization", "Token abcdefg"}, - {"Content-Type", "application/json"} + {"Authorization", "Basic Z3JlcHRpbWVfdXNlcjpncmVwdGltZV9wd2Q="}, + {"Content-Type", "application/x-www-form-urlencoded"} ], - Body = - emqx_utils_json:encode(#{ - query => Query, - dialect => #{ - header => true, - delimiter => <<";">> - } - }), + Body = <<"sql=select * from ", Topic/binary, " where clientid='", ClientId/binary, "'">>, {ok, 200, _Headers, RawBody0} = ehttpc:request( EHttpcPoolName, @@ -328,32 +317,30 @@ query_by_clientid(ClientId, Config) -> _Timeout = 10_000, _Retry = 0 ), - RawBody1 = iolist_to_binary(string:replace(RawBody0, <<"\r\n">>, <<"\n">>, all)), - {ok, DecodedCSV0} = erl_csv:decode(RawBody1, #{separator => <<$;>>}), - DecodedCSV1 = [ - [Field || Field <- Line, Field =/= <<>>] - || Line <- DecodedCSV0, - Line =/= [<<>>] - ], - DecodedCSV2 = csv_lines_to_maps(DecodedCSV1, []), - index_by_field(DecodedCSV2). + #{ + <<"code">> := 0, + <<"output">> := [ + #{ + <<"records">> := #{ + <<"rows">> := Rows, + <<"schema">> := Schema + } + } + ] + } = emqx_utils_json:decode(RawBody0, [return_maps]), -decode_csv(RawBody) -> - Lines = - [ - binary:split(Line, [<<";">>], [global, trim_all]) - || Line <- binary:split(RawBody, [<<"\r\n">>], [global, trim_all]) - ], - csv_lines_to_maps(Lines, []). + case Schema of + null -> + #{}; + #{<<"column_schemas">> := ColumnsSchemas} -> + Columns = lists:map(fun(#{<<"name">> := Name}) -> Name end, ColumnsSchemas), + index_by_field(Rows, Columns) + end. -csv_lines_to_maps([Fields, Data | Rest], Acc) -> - Map = maps:from_list(lists:zip(Fields, Data)), - csv_lines_to_maps(Rest, [Map | Acc]); -csv_lines_to_maps(_Data, Acc) -> - lists:reverse(Acc). - -index_by_field(DecodedCSV) -> - maps:from_list([{Field, Data} || Data = #{<<"_field">> := Field} <- DecodedCSV]). +index_by_field([], Columns) -> + #{}; +index_by_field([Row], Columns) -> + maps:from_list(lists:zip(Columns, Row)). assert_persisted_data(ClientId, Expected, PersistedData) -> ClientIdIntKey = <>, @@ -361,12 +348,12 @@ assert_persisted_data(ClientId, Expected, PersistedData) -> fun (int_value, ExpectedValue) -> ?assertMatch( - #{<<"_value">> := ExpectedValue}, + ExpectedValue, maps:get(ClientIdIntKey, PersistedData) ); (Key, ExpectedValue) -> ?assertMatch( - #{<<"_value">> := ExpectedValue}, + ExpectedValue, maps:get(atom_to_binary(Key), PersistedData), #{expected => ExpectedValue} ) @@ -409,12 +396,12 @@ t_start_ok(Config) -> sync -> ?assertMatch(ok, send_message(Config, SentData)) end, - PersistedData = query_by_clientid(ClientId, Config), + PersistedData = query_by_clientid(atom_to_binary(?FUNCTION_NAME), ClientId, Config), Expected = #{ - bool => <<"true">>, - int_value => <<"-123">>, - uint_value => <<"123">>, - float_value => <<"24.5">>, + bool => true, + int_value => -123, + uint_value => 123, + float_value => 24.5, payload => emqx_utils_json:encode(Payload) }, assert_persisted_data(ClientId, Expected, PersistedData), @@ -423,12 +410,16 @@ t_start_ok(Config) -> fun(Trace0) -> Trace = ?of_kind(greptimedb_connector_send_query, Trace0), ?assertMatch([#{points := [_]}], Trace), - [#{points := [Point]}] = Trace, + [#{points := [Point0]}] = Trace, + {Measurement, [Point]} = Point0, ct:pal("sent point: ~p", [Point]), + ?assertMatch( + <<_/binary>>, + Measurement + ), ?assertMatch( #{ fields := #{}, - measurement := <<_/binary>>, tags := #{}, timestamp := TS } when is_integer(TS), @@ -538,9 +529,6 @@ t_const_timestamp(Config) -> QueryMode = ?config(query_mode, Config), Const = erlang:system_time(nanosecond), ConstBin = integer_to_binary(Const), - TsStr = iolist_to_binary( - calendar:system_time_to_rfc3339(Const, [{unit, nanosecond}, {offset, "Z"}]) - ), ?assertMatch( {ok, _}, create_bridge( @@ -563,24 +551,11 @@ t_const_timestamp(Config) -> sync -> ?assertMatch(ok, send_message(Config, SentData)) end, - PersistedData = query_by_clientid(ClientId, Config), - Expected = #{foo => <<"123">>}, + PersistedData = query_by_clientid(<<"mqtt">>, ClientId, Config), + Expected = #{foo => 123}, assert_persisted_data(ClientId, Expected, PersistedData), - TimeReturned0 = maps:get(<<"_time">>, maps:get(<<"foo">>, PersistedData)), - TimeReturned = pad_zero(TimeReturned0), - ?assertEqual(TsStr, TimeReturned). - -%% greptimedb returns timestamps without trailing zeros such as -%% "2023-02-28T17:21:51.63678163Z" -%% while the standard should be -%% "2023-02-28T17:21:51.636781630Z" -pad_zero(BinTs) -> - StrTs = binary_to_list(BinTs), - [Nano | Rest] = lists:reverse(string:tokens(StrTs, ".")), - [$Z | NanoNum] = lists:reverse(Nano), - Padding = lists:duplicate(10 - length(Nano), $0), - NewNano = lists:reverse(NanoNum) ++ Padding ++ "Z", - iolist_to_binary(string:join(lists:reverse([NewNano | Rest]), ".")). + TimeReturned = maps:get(<<"greptime_timestamp">>, PersistedData), + ?assertEqual(Const, TimeReturned). t_boolean_variants(Config) -> QueryMode = ?config(query_mode, Config), @@ -621,11 +596,11 @@ t_boolean_variants(Config) -> case QueryMode of sync -> ok end, - PersistedData = query_by_clientid(ClientId, Config), + PersistedData = query_by_clientid(atom_to_binary(?FUNCTION_NAME), ClientId, Config), Expected = #{ - bool => atom_to_binary(Translation), - int_value => <<"-123">>, - uint_value => <<"123">>, + bool => Translation, + int_value => -123, + uint_value => 123, payload => emqx_utils_json:encode(Payload) }, assert_persisted_data(ClientId, Expected, PersistedData), @@ -728,7 +703,7 @@ t_create_disconnected(Config) -> end), fun(Trace) -> ?assertMatch( - [#{error := greptimedb_client_not_alive, reason := econnrefused}], + [#{error := greptimedb_client_not_alive, reason := _SomeReason}], ?of_kind(greptimedb_connector_start_failed, Trace) ), ok @@ -871,8 +846,8 @@ t_missing_field(Config) -> ok end, fun(Trace) -> - PersistedData0 = query_by_clientid(ClientId0, Config), - PersistedData1 = query_by_clientid(ClientId1, Config), + PersistedData0 = query_by_clientid(ClientId0, ClientId0, Config), + PersistedData1 = query_by_clientid(ClientId1, ClientId1, Config), case IsBatch of true -> ?assertMatch( @@ -893,65 +868,6 @@ t_missing_field(Config) -> ), ok. -t_authentication_error(Config0) -> - GreptimedbType = ?config(greptimedb_type, Config0), - GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), - GreptimeConfig = - case GreptimedbType of - grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>} - end, - Config = lists:keyreplace(greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig}), - ?check_trace( - begin - ?wait_async_action( - create_bridge(Config), - #{?snk_kind := greptimedb_connector_start_failed}, - 10_000 - ) - end, - fun(Trace) -> - ?assertMatch( - [#{error := auth_error} | _], - ?of_kind(greptimedb_connector_start_failed, Trace) - ), - ok - end - ), - ok. - -t_authentication_error_on_get_status(Config0) -> - ResourceId = resource_id(Config0), - - % Fake initialization to simulate credential update after bridge was created. - emqx_common_test_helpers:with_mock( - greptimedb, - check_auth, - fun(_) -> - ok - end, - fun() -> - GreptimedbType = ?config(greptimedb_type, Config0), - GreptimeConfig0 = proplists:get_value(greptimedb_config, Config0), - GreptimeConfig = - case GreptimedbType of - grpcv1 -> GreptimeConfig0#{<<"password">> => <<"wrong_password">>} - end, - Config = lists:keyreplace( - greptimedb_config, 1, Config0, {greptimedb_config, GreptimeConfig} - ), - {ok, _} = create_bridge(Config), - ?retry( - _Sleep = 1_000, - _Attempts = 10, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) - ) - end - ), - - % Now back to wrong credentials - ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)), - ok. - t_authentication_error_on_send_message(Config0) -> ResourceId = resource_id(Config0), QueryMode = proplists:get_value(query_mode, Config0, sync), From 49218569503e6502edd6a180a2f170626efd9304 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 20 Jul 2023 20:10:29 +0800 Subject: [PATCH 57/73] test: make all emqx_bridge_greptimedb_SUITE tests passing --- .../src/emqx_bridge_greptimedb_connector.erl | 4 +- .../test/emqx_bridge_greptimedb_SUITE.erl | 94 +++++++++++-------- 2 files changed, 58 insertions(+), 40 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 43455d5d2..655351842 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -81,7 +81,7 @@ on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, c #{batch => false, mode => sync, error => ErrorPoints} ), log_error_points(InstId, ErrorPoints), - ErrorPoints + {error, ErrorPoints} end. %% Once a Batched Data trans to points failed. @@ -463,7 +463,7 @@ parse_timestamp([TsBin]) -> continue_lines_to_points(Data, Item, Rest, ResultPointsAcc, ErrorPointsAcc) -> case line_to_point(Data, Item) of - #{fields := Fields} when map_size(Fields) =:= 0 -> + {_, [#{fields := Fields}]} when map_size(Fields) =:= 0 -> %% greptimedb client doesn't like empty field maps... ErrorPointsAcc1 = [{error, no_fields} | ErrorPointsAcc], lines_to_points(Data, Rest, ResultPointsAcc, ErrorPointsAcc1); diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index 044d6a2bd..b7fb6451e 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -9,6 +9,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx/include/logger.hrl"). %%------------------------------------------------------------------------------ %% CT boilerplate @@ -284,7 +285,8 @@ send_message(Config, Payload) -> Name = ?config(greptimedb_name, Config), Type = greptimedb_type_bin(?config(greptimedb_type, Config)), BridgeId = emqx_bridge_resource:bridge_id(Type, Name), - emqx_bridge:send_message(BridgeId, Payload). + Resp = emqx_bridge:send_message(BridgeId, Payload), + Resp. query_by_clientid(Topic, ClientId, Config) -> GreptimedbHost = ?config(greptimedb_host, Config), @@ -308,7 +310,7 @@ query_by_clientid(Topic, ClientId, Config) -> {"Authorization", "Basic Z3JlcHRpbWVfdXNlcjpncmVwdGltZV9wd2Q="}, {"Content-Type", "application/x-www-form-urlencoded"} ], - Body = <<"sql=select * from ", Topic/binary, " where clientid='", ClientId/binary, "'">>, + Body = <<"sql=select * from \"", Topic/binary, "\" where clientid='", ClientId/binary, "'">>, {ok, 200, _Headers, RawBody0} = ehttpc:request( EHttpcPoolName, @@ -317,29 +319,49 @@ query_by_clientid(Topic, ClientId, Config) -> _Timeout = 10_000, _Retry = 0 ), - #{ - <<"code">> := 0, - <<"output">> := [ - #{ - <<"records">> := #{ - <<"rows">> := Rows, - <<"schema">> := Schema - } - } - ] - } = emqx_utils_json:decode(RawBody0, [return_maps]), - case Schema of - null -> - #{}; - #{<<"column_schemas">> := ColumnsSchemas} -> - Columns = lists:map(fun(#{<<"name">> := Name}) -> Name end, ColumnsSchemas), - index_by_field(Rows, Columns) + case emqx_utils_json:decode(RawBody0, [return_maps]) of + #{ + <<"code">> := 0, + <<"output">> := [ + #{ + <<"records">> := #{ + <<"rows">> := Rows, + <<"schema">> := Schema + } + } + ] + } -> + make_row(Schema, Rows); + #{ + <<"code">> := Code, + <<"error">> := Error + } -> + GreptimedbName = ?config(greptimedb_name, Config), + Type = greptimedb_type_bin(?config(greptimedb_type, Config)), + BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName), + + ?SLOG(error, #{ + msg => io_lib:format("Failed to query: ~p, ~p", [Code, Error]), + connector => BridgeId, + reason => Error + }), + %% TODO(dennis): check the error by code + case binary:match(Error, <<"Table not found">>) of + nomatch -> + {error, Error}; + _ -> + %% Table not found + #{} + end end. -index_by_field([], Columns) -> +make_row(null, _Rows) -> #{}; -index_by_field([Row], Columns) -> +make_row(_Schema, []) -> + #{}; +make_row(#{<<"column_schemas">> := ColumnsSchemas}, [Row]) -> + Columns = lists:map(fun(#{<<"name">> := Name}) -> Name end, ColumnsSchemas), maps:from_list(lists:zip(Columns, Row)). assert_persisted_data(ClientId, Expected, PersistedData) -> @@ -784,26 +806,22 @@ t_write_failure(Config) -> emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> case QueryMode of sync -> - {_, {ok, _}} = - ?wait_async_action( - ?assertMatch( - {error, {resource_error, #{reason := timeout}}}, - send_message(Config, SentData) - ), - #{?snk_kind := handle_async_reply, action := nack}, - 1_000 - ) + ?wait_async_action( + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + send_message(Config, SentData) + ), + #{?snk_kind := greptimedb_connector_do_query_failure, action := nack}, + 16_000 + ) end end), - fun(Trace0) -> + fun(Trace) -> case QueryMode of sync -> - Trace = ?of_kind(handle_async_reply, Trace0), - ?assertMatch([_ | _], Trace), - [#{result := Result} | _] = Trace, - ?assert( - not emqx_bridge_greptimedb_connector:is_unrecoverable_error(Result), - #{got => Result} + ?assertMatch( + [#{error := _} | _], + ?of_kind(greptimedb_connector_do_query_failure, Trace) ) end, ok @@ -841,7 +859,7 @@ t_missing_field(Config) -> ?match_n_events(NEvents, #{ ?snk_kind := greptimedb_connector_send_query_error }), - _Timeout1 = 10_000 + _Timeout1 = 16_000 ), ok end, From 6f7fbcf6937790b507ea7bb931dcef33103fd746 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 20 Jul 2023 20:17:07 +0800 Subject: [PATCH 58/73] fix: compile error --- apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index 4a0428675..c6765e859 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -123,7 +123,7 @@ resource_type(pulsar_producer) -> emqx_bridge_pulsar_impl_producer; resource_type(oracle) -> emqx_oracle; resource_type(iotdb) -> emqx_bridge_iotdb_impl; resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector; -resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer. +resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer; resource_type(greptimedb_grpc_v1) -> emqx_bridge_greptimedb_connector. fields(bridges) -> From 3b1363dbb7751008ed9199de14719a5cd552d0b8 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 20 Jul 2023 20:51:52 +0800 Subject: [PATCH 59/73] chore: change license to BCL and adds emqx_bridge_greptimedb_connector_SUITE --- apps/emqx_bridge_greptimedb/BSL.txt | 94 +++++++++ apps/emqx_bridge_greptimedb/LICENSE | 191 ------------------ apps/emqx_bridge_greptimedb/README.md | 14 +- .../src/emqx_bridge_greptimedb.app.src | 3 +- .../src/emqx_bridge_greptimedb_connector.erl | 10 +- .../test/emqx_bridge_greptimedb_SUITE.erl | 7 +- ...emqx_bridge_greptimedb_connector_SUITE.erl | 152 ++++++++++++++ 7 files changed, 266 insertions(+), 205 deletions(-) create mode 100644 apps/emqx_bridge_greptimedb/BSL.txt delete mode 100644 apps/emqx_bridge_greptimedb/LICENSE create mode 100644 apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl diff --git a/apps/emqx_bridge_greptimedb/BSL.txt b/apps/emqx_bridge_greptimedb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_greptimedb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_greptimedb/LICENSE b/apps/emqx_bridge_greptimedb/LICENSE deleted file mode 100644 index 64d3c22a9..000000000 --- a/apps/emqx_bridge_greptimedb/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2023, Dennis Zhuang . - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/apps/emqx_bridge_greptimedb/README.md b/apps/emqx_bridge_greptimedb/README.md index 13f26c348..b92538c66 100644 --- a/apps/emqx_bridge_greptimedb/README.md +++ b/apps/emqx_bridge_greptimedb/README.md @@ -13,7 +13,15 @@ For more information about GreptimeDB, please refer to [official ## Configurations -TODO +Just like the InfluxDB data bridge but have some different parameters. Below are several important parameters: + - `server`: The IPv4 or IPv6 address or the hostname to connect to. + - `dbname`: The GreptimeDB database name. + - `write_syntax`: Like the `write_syntax` in `InfluxDB` conf, it's the conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. -## License -[Apache License 2.0](./LICENSE) + +# Contributing - [Mandatory] +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index f0a07bc28..53053c80b 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -5,10 +5,11 @@ {applications, [ kernel, stdlib, + emqx_resource, + emqx_bridge, greptimedb ]}, {env, []}, {modules, []}, - {licenses, ["Apache-2.0"]}, {links, []} ]}. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 655351842..a31606bbc 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -129,10 +129,7 @@ fields(common) -> [ {server, server()}, {precision, - %% The greptimedb only supports these 4 precision: - %% See "https://github.com/influxdata/greptimedb/blob/ - %% 6b607288439a991261307518913eb6d4e280e0a7/models/points.go#L487" for - %% more information. + %% The greptimedb only supports these 4 precision mk(enum([ns, us, ms, s]), #{ required => false, default => ms, desc => ?DESC("precision") })} @@ -306,12 +303,13 @@ is_auth_key(_) -> %% Query do_query(InstId, Client, Points) -> case greptimedb:write_batch(Client, Points) of - {ok, _} -> + {ok, #{response := {affected_rows, #{value := Rows}}}} -> ?SLOG(debug, #{ msg => "greptimedb write point success", connector => InstId, points => Points - }); + }), + {ok, {affected_rows, Rows}}; {error, {unauth, _, _}} -> ?tp(greptimedb_connector_do_query_failure, #{error => <<"authorization failure">>}), ?SLOG(error, #{ diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index b7fb6451e..1a38c882f 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -87,7 +87,6 @@ init_per_group(GreptimedbType, Config0) when emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), ok = start_apps(), {ok, _} = application:ensure_all_started(emqx_connector), - application:ensure_all_started(greptimedb), emqx_mgmt_api_test_util:init_suite(), Config = [{use_tls, UseTLS} | Config0], {Name, ConfigString, GreptimedbConfig} = greptimedb_config( @@ -416,7 +415,7 @@ t_start_ok(Config) -> begin case QueryMode of sync -> - ?assertMatch(ok, send_message(Config, SentData)) + ?assertMatch({ok, _}, send_message(Config, SentData)) end, PersistedData = query_by_clientid(atom_to_binary(?FUNCTION_NAME), ClientId, Config), Expected = #{ @@ -571,7 +570,7 @@ t_const_timestamp(Config) -> }, case QueryMode of sync -> - ?assertMatch(ok, send_message(Config, SentData)) + ?assertMatch({ok, _}, send_message(Config, SentData)) end, PersistedData = query_by_clientid(<<"mqtt">>, ClientId, Config), Expected = #{foo => 123}, @@ -613,7 +612,7 @@ t_boolean_variants(Config) -> }, case QueryMode of sync -> - ?assertMatch(ok, send_message(Config, SentData)) + ?assertMatch({ok, _}, send_message(Config, SentData)) end, case QueryMode of sync -> ok diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl new file mode 100644 index 000000000..3e576393a --- /dev/null +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl @@ -0,0 +1,152 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_greptimedb_connector_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(GREPTIMEDB_RESOURCE_MOD, emqx_bridge_greptimedb_connector). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +init_per_suite(Config) -> + GreptimedbTCPHost = os:getenv("GREPTIMEDB_GRPCV1_TCP_HOST", "toxiproxy"), + GreptimedbTCPPort = list_to_integer(os:getenv("GREPTIMEDB_GRPCV1_TCP_PORT", "4001")), + Servers = [{GreptimedbTCPHost, GreptimedbTCPPort}], + case emqx_common_test_helpers:is_all_tcp_servers_available(Servers) of + true -> + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + [ + {greptimedb_tcp_host, GreptimedbTCPHost}, + {greptimedb_tcp_port, GreptimedbTCPPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_greptimedb); + _ -> + {skip, no_greptimedb} + end + end. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). + +init_per_testcase(_, Config) -> + Config. + +end_per_testcase(_, _Config) -> + ok. + +% %%------------------------------------------------------------------------------ +% %% Testcases +% %%------------------------------------------------------------------------------ + +t_lifecycle(Config) -> + Host = ?config(greptimedb_tcp_host, Config), + Port = ?config(greptimedb_tcp_port, Config), + perform_lifecycle_check( + <<"emqx_bridge_greptimedb_connector_SUITE">>, + greptimedb_config(Host, Port) + ). + +perform_lifecycle_check(PoolName, InitialConfig) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(?GREPTIMEDB_RESOURCE_MOD, InitialConfig), + % We need to add a write_syntax to the config since the connector + % expects this + FullConfig = CheckedConfig#{write_syntax => greptimedb_write_syntax()}, + {ok, #{ + state := #{client := #{pool := ReturnedPoolName}} = State, + status := InitialStatus + }} = emqx_resource:create_local( + PoolName, + ?CONNECTOR_RESOURCE_GROUP, + ?GREPTIMEDB_RESOURCE_MOD, + FullConfig, + #{} + ), + ?assertEqual(InitialStatus, connected), + % Instance should match the state and status of the just started resource + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := InitialStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + % % Perform query as further check that the resource is working as expected + ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())), + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Resource will be listed still, but state will be changed and healthcheck will fail + % as the worker no longer exists. + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := StoppedStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual(stopped, StoppedStatus), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Can call stop/1 again on an already stopped instance + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Make sure it can be restarted and the healthchecks and queries work properly + ?assertEqual(ok, emqx_resource:restart(PoolName)), + % async restart, need to wait resource + timer:sleep(500), + {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + ?assertMatch({ok, _}, emqx_resource:query(PoolName, test_query())), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(PoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Should not even be able to get the resource data out of ets now unlike just stopping. + ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + +% %%------------------------------------------------------------------------------ +% %% Helpers +% %%------------------------------------------------------------------------------ + +greptimedb_config(Host, Port) -> + Server = list_to_binary(io_lib:format("~s:~b", [Host, Port])), + ResourceConfig = #{ + <<"dbname">> => <<"public">>, + <<"server">> => Server, + <<"username">> => <<"greptime_user">>, + <<"password">> => <<"greptime_pwd">> + }, + #{<<"config">> => ResourceConfig}. + +greptimedb_write_syntax() -> + [ + #{ + measurement => "${topic}", + tags => [{"clientid", "${clientid}"}], + fields => [{"payload", "${payload}"}], + timestamp => undefined + } + ]. + +test_query() -> + {send_message, #{ + <<"clientid">> => <<"something">>, + <<"payload">> => #{bool => true}, + <<"topic">> => <<"connector_test">>, + <<"timestamp">> => 1678220316257 + }}. From 50c10dd91971a2945cf5d6fe5b2a2b9ca6c83f52 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Thu, 20 Jul 2023 23:06:23 +0800 Subject: [PATCH 60/73] chore: update greptimedb-client-erl to v0.1.2 --- apps/emqx_bridge_greptimedb/rebar.config | 2 +- .../test/emqx_bridge_greptimedb_SUITE.erl | 2 ++ .../test/emqx_bridge_greptimedb_connector_SUITE.erl | 5 ++++- mix.exs | 1 + 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index 2001a72fc..57d45997f 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -6,7 +6,7 @@ {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {branch, "feature/check-auth"}}} + {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.2"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index 1a38c882f..f563b7e71 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -49,6 +49,7 @@ end_per_suite(_Config) -> emqx_conf, emqx_bridge, emqx_resource, emqx_rule_engine ]), _ = application:stop(emqx_connector), + _ = application:stop(greptimedb), ok. init_per_group(GreptimedbType, Config0) when @@ -87,6 +88,7 @@ init_per_group(GreptimedbType, Config0) when emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), ok = start_apps(), {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(greptimedb), emqx_mgmt_api_test_util:init_suite(), Config = [{use_tls, UseTLS} | Config0], {Name, ConfigString, GreptimedbConfig} = greptimedb_config( diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl index 3e576393a..a4acf5b4e 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_connector_SUITE.erl @@ -28,6 +28,7 @@ init_per_suite(Config) -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), ok = emqx_connector_test_helpers:start_apps([emqx_resource]), {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(greptimedb), [ {greptimedb_tcp_host, GreptimedbTCPHost}, {greptimedb_tcp_port, GreptimedbTCPPort} @@ -45,7 +46,9 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), - _ = application:stop(emqx_connector). + _ = application:stop(emqx_connector), + _ = application:stop(greptimedb), + ok. init_per_testcase(_, Config) -> Config. diff --git a/mix.exs b/mix.exs index 4d6cf700b..21a238f22 100644 --- a/mix.exs +++ b/mix.exs @@ -209,6 +209,7 @@ defmodule EMQXUmbrella.MixProject do {:crc32cer, "0.1.8", override: true}, {:supervisor3, "1.1.12", override: true}, {:opentsdb, github: "emqx/opentsdb-client-erl", tag: "v0.5.1", override: true}, + {:greptimedb, github: "GreptimeTeam/greptimedb-client-erl", tag: "v0.1.2", override: true}, # The following two are dependencies of rabbit_common. They are needed here to # make mix not complain about conflicting versions {:thoas, github: "emqx/thoas", tag: "v1.0.0", override: true}, From a1c7eb337be73c77030c8c5fef4f6175eeec88a0 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 10:37:59 +0800 Subject: [PATCH 61/73] fix: dependency name --- apps/emqx_bridge_greptimedb/rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/emqx_bridge_greptimedb/rebar.config b/apps/emqx_bridge_greptimedb/rebar.config index 57d45997f..f0942f910 100644 --- a/apps/emqx_bridge_greptimedb/rebar.config +++ b/apps/emqx_bridge_greptimedb/rebar.config @@ -6,7 +6,7 @@ {emqx_connector, {path, "../../apps/emqx_connector"}}, {emqx_resource, {path, "../../apps/emqx_resource"}}, {emqx_bridge, {path, "../../apps/emqx_bridge"}}, - {greptimedb_client_erl, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.2"}}} + {greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.2"}}} ]}. {plugins, [rebar3_path_deps]}. {project_plugins, [erlfmt]}. From ffcd04bc9fbb1d780964ee1fcda0d03f8afcdc2a Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 10:42:50 +0800 Subject: [PATCH 62/73] docs: add change log --- changes/ee/feat-10647.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ee/feat-10647.en.md diff --git a/changes/ee/feat-10647.en.md b/changes/ee/feat-10647.en.md new file mode 100644 index 000000000..2b341fa4b --- /dev/null +++ b/changes/ee/feat-10647.en.md @@ -0,0 +1 @@ +Add enterprise data bridge for [GreptimeDB](https://github.com/GreptimeTeam/greptimedb). \ No newline at end of file From c9550cc2e5a6b5925327e90d7c2b664aca0ad086 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 14:33:05 +0800 Subject: [PATCH 63/73] refactor: rename bridge greptimedb_grpc_v1 to greptimedb --- apps/emqx_bridge/src/emqx_bridge.erl | 2 +- .../src/schema/emqx_bridge_enterprise.erl | 6 +++--- .../src/emqx_bridge_greptimedb.erl | 20 +++++++++---------- .../src/emqx_bridge_greptimedb_connector.erl | 10 +++++----- .../test/emqx_bridge_greptimedb_SUITE.erl | 10 +++++----- .../emqx_bridge_greptimedb_connector.hocon | 4 ++-- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index b60276910..612481663 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -90,7 +90,7 @@ T == oracle; T == iotdb; T == kinesis_producer; - T == greptimedb_grpc_v1 + T == greptimedb ). -define(ROOT_KEY, bridges). diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl index c6765e859..c23ffb6df 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_enterprise.erl @@ -50,7 +50,7 @@ api_schemas(Method) -> api_ref(emqx_bridge_iotdb, <<"iotdb">>, Method), api_ref(emqx_bridge_rabbitmq, <<"rabbitmq">>, Method), api_ref(emqx_bridge_kinesis, <<"kinesis_producer">>, Method ++ "_producer"), - api_ref(emqx_bridge_greptimedb, <<"greptimedb_grpc_v1">>, Method ++ "_grpc_v1") + api_ref(emqx_bridge_greptimedb, <<"greptimedb">>, Method ++ "_grpc_v1") ]. schema_modules() -> @@ -124,7 +124,7 @@ resource_type(oracle) -> emqx_oracle; resource_type(iotdb) -> emqx_bridge_iotdb_impl; resource_type(rabbitmq) -> emqx_bridge_rabbitmq_connector; resource_type(kinesis_producer) -> emqx_bridge_kinesis_impl_producer; -resource_type(greptimedb_grpc_v1) -> emqx_bridge_greptimedb_connector. +resource_type(greptimedb) -> emqx_bridge_greptimedb_connector. fields(bridges) -> [ @@ -302,7 +302,7 @@ greptimedb_structs() -> } )} || Protocol <- [ - greptimedb_grpc_v1 + greptimedb ] ]. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl index 415544fcf..877e464dd 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.erl @@ -32,16 +32,16 @@ conn_bridge_examples(Method) -> [ #{ - <<"greptimedb_grpc_v1">> => #{ + <<"greptimedb">> => #{ summary => <<"Greptimedb HTTP API V2 Bridge">>, - value => values("greptimedb_grpc_v1", Method) + value => values("greptimedb", Method) } } ]. values(Protocol, get) -> values(Protocol, post); -values("greptimedb_grpc_v1", post) -> +values("greptimedb", post) -> SupportUint = <<"uint_value=${payload.uint_key}u,">>, TypeOpts = #{ bucket => <<"example_bucket">>, @@ -49,7 +49,7 @@ values("greptimedb_grpc_v1", post) -> token => <<"example_token">>, server => <<"127.0.0.1:4001">> }, - values(common, "greptimedb_grpc_v1", SupportUint, TypeOpts); + values(common, "greptimedb", SupportUint, TypeOpts); values(Protocol, put) -> values(Protocol, post). @@ -80,13 +80,13 @@ namespace() -> "bridge_greptimedb". roots() -> []. fields("post_grpc_v1") -> - method_fields(post, greptimedb_grpc_v1); + method_fields(post, greptimedb); fields("put_grpc_v1") -> - method_fields(put, greptimedb_grpc_v1); + method_fields(put, greptimedb); fields("get_grpc_v1") -> - method_fields(get, greptimedb_grpc_v1); + method_fields(get, greptimedb); fields(Type) when - Type == greptimedb_grpc_v1 + Type == greptimedb -> greptimedb_bridge_common_fields() ++ connector_fields(Type). @@ -125,8 +125,8 @@ desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for Greptimedb using `", string:to_upper(Method), "` method."]; -desc(greptimedb_grpc_v1) -> - ?DESC(emqx_bridge_greptimedb_connector, "greptimedb_grpc_v1"); +desc(greptimedb) -> + ?DESC(emqx_bridge_greptimedb_connector, "greptimedb"); desc(_) -> undefined. diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index a31606bbc..7473f9690 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -119,7 +119,7 @@ roots() -> {config, #{ type => hoconsc:union( [ - hoconsc:ref(?MODULE, greptimedb_grpc_v1) + hoconsc:ref(?MODULE, greptimedb) ] ) }} @@ -134,7 +134,7 @@ fields(common) -> required => false, default => ms, desc => ?DESC("precision") })} ]; -fields(greptimedb_grpc_v1) -> +fields(greptimedb) -> fields(common) ++ [ {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})}, @@ -159,8 +159,8 @@ server() -> desc(common) -> ?DESC("common"); -desc(greptimedb_grpc_v1) -> - ?DESC("greptimedb_grpc_v1"). +desc(greptimedb) -> + ?DESC("greptimedb"). %% ------------------------------------------------------------------------------------------------- %% internal functions @@ -613,7 +613,7 @@ desc_test_() -> ), ?_assertMatch( {desc, _, _}, - desc(greptimedb_grpc_v1) + desc(greptimedb) ), ?_assertMatch( {desc, _, _}, diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index f563b7e71..25bdb6b76 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -191,7 +191,7 @@ greptimedb_config(grpcv1 = Type, GreptimedbHost, GreptimedbPort, Config) -> WriteSyntax = example_write_syntax(), ConfigString = io_lib:format( - "bridges.greptimedb_grpc_v1.~s {\n" + "bridges.greptimedb.~s {\n" " enable = true\n" " server = \"~p:~b\"\n" " dbname = public\n" @@ -229,7 +229,7 @@ parse_and_check(ConfigString, Type, Name) -> Config. greptimedb_type_bin(grpcv1) -> - <<"greptimedb_grpc_v1">>. + <<"greptimedb">>. create_bridge(Config) -> create_bridge(Config, _Overrides = #{}). @@ -487,7 +487,7 @@ t_start_ok_timestamp_write_syntax(Config) -> GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), GreptimedbTypeCfg = case GreptimedbType of - grpcv1 -> "greptimedb_grpc_v1" + grpcv1 -> "greptimedb" end, WriteSyntax = %% N.B.: this single space characters are relevant @@ -521,7 +521,7 @@ t_start_ok_no_subject_tags_write_syntax(Config) -> GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), GreptimedbTypeCfg = case GreptimedbType of - grpcv1 -> "greptimedb_grpc_v1" + grpcv1 -> "greptimedb" end, WriteSyntax = %% N.B.: this single space characters are relevant @@ -641,7 +641,7 @@ t_bad_timestamp(Config) -> GreptimedbConfigString0 = ?config(greptimedb_config_string, Config), GreptimedbTypeCfg = case GreptimedbType of - grpcv1 -> "greptimedb_grpc_v1" + grpcv1 -> "greptimedb" end, WriteSyntax = %% N.B.: this single space characters are relevant diff --git a/rel/i18n/emqx_bridge_greptimedb_connector.hocon b/rel/i18n/emqx_bridge_greptimedb_connector.hocon index 87370b211..9cb10951f 100644 --- a/rel/i18n/emqx_bridge_greptimedb_connector.hocon +++ b/rel/i18n/emqx_bridge_greptimedb_connector.hocon @@ -6,10 +6,10 @@ dbname.desc: dbname.label: """Database""" -greptimedb_grpc_v1.desc: +greptimedb.desc: """GreptimeDB's protocol. Support GreptimeDB v1.8 and before.""" -greptimedb_grpc_v1.label: +greptimedb.label: """HTTP API Protocol""" password.desc: From b34374c26ff8e7af866a3fc291d2993ee3c863f0 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 14:59:24 +0800 Subject: [PATCH 64/73] chore: by CR comments --- .tool-versions | 2 +- .../src/emqx_bridge_greptimedb_connector.erl | 20 +++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/.tool-versions b/.tool-versions index 0dbab2a1d..3a2251dc8 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -erlang 25.3.2.3 +erlang 25.3.2-1 elixir 1.14.5-otp-25 diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 7473f9690..c021e4354 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -32,7 +32,9 @@ ]). %% only for test +-ifdef(TEST). -export([is_unrecoverable_error/1]). +-endif. -type ts_precision() :: ns | us | ms | s. @@ -186,8 +188,8 @@ start_client(InstId, Config) -> msg => "start greptimedb connector error", connector => InstId, error => E, - reason => R, - stack => S + reason => emqx_utils:redact(R), + stack => emqx_utils:redact(S) }), {error, R} end. @@ -342,7 +344,7 @@ to_config(Lines, Precision) -> to_config([], Acc, _Precision) -> lists:reverse(Acc); to_config([Item0 | Rest], Acc, Precision) -> - Ts0 = maps:get(timestamp, Item0, undefined), + Ts0 = maps:get(timestamp, Item0, ?DEFAULT_TIMESTAMP_TMPL), {Ts, FromPrecision, ToPrecision} = preproc_tmpl_timestamp(Ts0, Precision), Item = #{ measurement => emqx_placeholder:preproc_tmpl(maps:get(measurement, Item0)), @@ -374,7 +376,11 @@ preproc_tmpl_timestamp(Ts, Precision) when is_binary(Ts) -> {emqx_placeholder:preproc_tmpl(Ts), Precision, Precision}. to_kv_config(KVfields) -> - maps:fold(fun to_maps_config/3, #{}, proplists:to_map(KVfields)). + lists:foldl( + fun({K, V}, Acc) -> to_maps_config(K, V, Acc) end, + #{}, + KVfields + ). to_maps_config(K, V, Res) -> NK = emqx_placeholder:preproc_tmpl(bin(K)), @@ -391,6 +397,8 @@ parse_batch_data(InstId, BatchData, SyntaxLines) -> {[Points | ListOfPoints], ErrAccIn}; {error, ErrorPoints} -> log_error_points(InstId, ErrorPoints), + {ListOfPoints, ErrAccIn + 1}; + _ -> {ListOfPoints, ErrAccIn + 1} end end, @@ -522,8 +530,6 @@ value_type([UInt, <<"u">>]) when is_integer(UInt) -> greptimedb_values:uint64_value(UInt); -value_type([Float]) when is_float(Float) -> - Float; value_type([<<"t">>]) -> greptimedb_values:boolean_value(true); value_type([<<"T">>]) -> @@ -544,6 +550,8 @@ value_type([<<"FALSE">>]) -> greptimedb_values:boolean_value(false); value_type([<<"False">>]) -> greptimedb_values:boolean_value(false); +value_type([Float]) when is_float(Float) -> + Float; value_type(Val) -> #{values => #{string_values => Val}, datatype => 'STRING'}. From ba9dcbcff0471c29cead4f40dc3e03edcafdccd3 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 15:39:41 +0800 Subject: [PATCH 65/73] chore: style --- changes/ee/feat-10647.en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changes/ee/feat-10647.en.md b/changes/ee/feat-10647.en.md index 2b341fa4b..b42ef1f94 100644 --- a/changes/ee/feat-10647.en.md +++ b/changes/ee/feat-10647.en.md @@ -1 +1 @@ -Add enterprise data bridge for [GreptimeDB](https://github.com/GreptimeTeam/greptimedb). \ No newline at end of file +Add enterprise data bridge for [GreptimeDB](https://github.com/GreptimeTeam/greptimedb). From 2ea903c5aca90f1bfc1166ec95f8305b0a4725aa Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 17:12:19 +0800 Subject: [PATCH 66/73] fix: static checks failures --- .../src/emqx_bridge_greptimedb_connector.erl | 6 ++---- .../test/emqx_bridge_greptimedb_SUITE.erl | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index c021e4354..666073913 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -271,7 +271,7 @@ protocol_config( } = Config ) -> [ - {dbname, DbName} + {dbname, str(DbName)} ] ++ auth(Config) ++ ssl_config(SSL). @@ -288,7 +288,7 @@ ssl_config(SSL = #{enable := true}) -> auth(#{username := Username, password := Password}) -> [ - {auth, {basic, #{username => Username, password => Password}}} + {auth, {basic, #{username => str(Username), password => str(Password)}}} ]; auth(_) -> []. @@ -397,8 +397,6 @@ parse_batch_data(InstId, BatchData, SyntaxLines) -> {[Points | ListOfPoints], ErrAccIn}; {error, ErrorPoints} -> log_error_points(InstId, ErrorPoints), - {ListOfPoints, ErrAccIn + 1}; - _ -> {ListOfPoints, ErrAccIn + 1} end end, diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index 25bdb6b76..bcd57f530 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -49,7 +49,6 @@ end_per_suite(_Config) -> emqx_conf, emqx_bridge, emqx_resource, emqx_rule_engine ]), _ = application:stop(emqx_connector), - _ = application:stop(greptimedb), ok. init_per_group(GreptimedbType, Config0) when @@ -145,6 +144,7 @@ end_per_group(Group, Config) when emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), ehttpc_sup:stop_pool(EHttpcPoolName), delete_bridge(Config), + _ = application:stop(greptimedb), ok; end_per_group(_Group, _Config) -> ok. From cd9d5f287ee2de93e76a5ed33dcc2d3633637240 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Fri, 21 Jul 2023 19:16:58 +0800 Subject: [PATCH 67/73] chore: adds auto_reconnect for ecpool --- .../src/emqx_bridge_greptimedb_connector.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index 666073913..4be100594 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -51,6 +51,8 @@ -define(DEFAULT_TIMESTAMP_TMPL, "${timestamp}"). +-define(AUTO_RECONNECT_S, 1). + %% ------------------------------------------------------------------------------------------------- %% resource callback callback_mode() -> always_sync. @@ -261,6 +263,7 @@ client_config( {pool_size, erlang:system_info(schedulers)}, {pool, InstId}, {pool_type, random}, + {auto_reconnect, ?AUTO_RECONNECT_S}, {timeunit, maps:get(precision, Config, ms)} ] ++ protocol_config(Config). From 9f200120c2f2b4955db0e587fdc6b57de43c6625 Mon Sep 17 00:00:00 2001 From: Dennis Zhuang Date: Mon, 24 Jul 2023 12:04:29 +0800 Subject: [PATCH 68/73] feat: use http port to detect server availability --- .../test/emqx_bridge_greptimedb_SUITE.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl index bcd57f530..d4bc5b01e 100644 --- a/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl +++ b/apps/emqx_bridge_greptimedb/test/emqx_bridge_greptimedb_SUITE.erl @@ -80,7 +80,7 @@ init_per_group(GreptimedbType, Config0) when proxy_name => "greptimedb_tls" } end, - case emqx_common_test_helpers:is_tcp_server_available(GreptimedbHost, GreptimedbPort) of + case emqx_common_test_helpers:is_tcp_server_available(GreptimedbHost, GreptimedbHttpPort) of true -> ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), @@ -93,7 +93,7 @@ init_per_group(GreptimedbType, Config0) when {Name, ConfigString, GreptimedbConfig} = greptimedb_config( grpcv1, GreptimedbHost, GreptimedbPort, Config ), - EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_grpcv1">>, + EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_http">>, EHttpcPoolName = binary_to_atom(EHttpcPoolNameBin), {EHttpcTransport, EHttpcTransportOpts} = case UseTLS of From 8439ce0e844655471f9501babbc31c69fb9261d2 Mon Sep 17 00:00:00 2001 From: firest Date: Mon, 24 Jul 2023 18:41:28 +0800 Subject: [PATCH 69/73] chore: update app version && reboot_lists --- apps/emqx/src/emqx.app.src | 2 +- apps/emqx_bridge/src/emqx_bridge.app.src | 2 +- apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src | 1 - apps/emqx_machine/priv/reboot_lists.eterm | 1 + apps/emqx_machine/src/emqx_machine.app.src | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index 47f1ae4b4..5ee4e2688 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -2,7 +2,7 @@ {application, emqx, [ {id, "emqx"}, {description, "EMQX Core"}, - {vsn, "5.1.2"}, + {vsn, "5.1.3"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 11d199c9d..fabf4d334 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.23"}, + {vsn, "0.1.24"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index 53053c80b..c048a0d0c 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -6,7 +6,6 @@ kernel, stdlib, emqx_resource, - emqx_bridge, greptimedb ]}, {env, []}, diff --git a/apps/emqx_machine/priv/reboot_lists.eterm b/apps/emqx_machine/priv/reboot_lists.eterm index 500a47d8f..92f6b4bbd 100644 --- a/apps/emqx_machine/priv/reboot_lists.eterm +++ b/apps/emqx_machine/priv/reboot_lists.eterm @@ -85,6 +85,7 @@ emqx_bridge_opents, emqx_bridge_clickhouse, emqx_bridge_dynamo, + emqx_bridge_greptimedb, emqx_bridge_hstreamdb, emqx_bridge_influxdb, emqx_bridge_iotdb, diff --git a/apps/emqx_machine/src/emqx_machine.app.src b/apps/emqx_machine/src/emqx_machine.app.src index e81d4b53f..9a9dedc28 100644 --- a/apps/emqx_machine/src/emqx_machine.app.src +++ b/apps/emqx_machine/src/emqx_machine.app.src @@ -3,7 +3,7 @@ {id, "emqx_machine"}, {description, "The EMQX Machine"}, % strict semver, bump manually! - {vsn, "0.2.8"}, + {vsn, "0.2.9"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, emqx_ctl]}, From 77184c00ffe7e3cfca0ed801c0050fd69dde64a5 Mon Sep 17 00:00:00 2001 From: Serge Tupchii Date: Tue, 25 Jul 2023 19:19:33 +0300 Subject: [PATCH 70/73] chore(ekka): Bump version to 0.15.9 --- apps/emqx/rebar.config | 2 +- changes/ce/fix-11346.en.md | 2 ++ mix.exs | 2 +- rebar.config | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changes/ce/fix-11346.en.md diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index ea362dac8..d24999972 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -28,7 +28,7 @@ {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.8"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.9"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.14"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, diff --git a/changes/ce/fix-11346.en.md b/changes/ce/fix-11346.en.md new file mode 100644 index 000000000..e38ea98d9 --- /dev/null +++ b/changes/ce/fix-11346.en.md @@ -0,0 +1,2 @@ +Update ekka to version 0.15.9. +This fixes dangling etcd locks occurred if acquiring the lock failed with a timeout. diff --git a/mix.exs b/mix.exs index 21a238f22..0abd382ab 100644 --- a/mix.exs +++ b/mix.exs @@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do {:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.6", override: true}, {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-1", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.15.8", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.15.9", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.8", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.11", override: true}, diff --git a/rebar.config b/rebar.config index b4fb4fb9e..7a8e0bb02 100644 --- a/rebar.config +++ b/rebar.config @@ -62,7 +62,7 @@ , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}} , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-1"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.8"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.9"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.8"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.11"}}} From 14b76916dfe22f015a7276d495c867592f5e21f9 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 26 Jul 2023 15:37:58 +0800 Subject: [PATCH 71/73] fix(ds): avoid crashes when starting on Windows --- apps/emqx_durable_storage/src/emqx_ds_app.erl | 10 +++++++++- .../src/emqx_durable_storage.app.src | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_app.erl b/apps/emqx_durable_storage/src/emqx_ds_app.erl index fb4d487e9..73269d61c 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_app.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_app.erl @@ -13,12 +13,20 @@ start(_Type, _Args) -> emqx_ds_sup:start_link(). init_mnesia() -> + %% FIXME: This is a temporary workaround to avoid crashes when starting on Windows + Storage = + case mria:rocksdb_backend_available() of + true -> + rocksdb_copies; + _ -> + disc_copies + end, ok = mria:create_table( ?SESSION_TAB, [ {rlog_shard, ?DS_SHARD}, {type, set}, - {storage, rocksdb_copies}, + {storage, Storage}, {record_name, session}, {attributes, record_info(fields, session)} ] diff --git a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src index 944477306..ecf9dd270 100644 --- a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src +++ b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src @@ -2,7 +2,7 @@ {application, emqx_durable_storage, [ {description, "Message persistence and subscription replays for EMQX"}, % strict semver, bump manually! - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, rocksdb, gproc, mria]}, From 470426872775a0bc59aca0de79b0e35a556e2626 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 26 Jul 2023 15:44:39 +0800 Subject: [PATCH 72/73] chore: update changes --- changes/ce/fix-11352.en.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/ce/fix-11352.en.md diff --git a/changes/ce/fix-11352.en.md b/changes/ce/fix-11352.en.md new file mode 100644 index 000000000..537bc2f5e --- /dev/null +++ b/changes/ce/fix-11352.en.md @@ -0,0 +1 @@ +Fixed this [#11345](https://github.com/emqx/emqx/issues/11345) crash issue when starting on Windows or any other platform without RocksDB support. From abf8d8b391de7b800af73cbf4b15cba558c1f6e1 Mon Sep 17 00:00:00 2001 From: firest Date: Wed, 26 Jul 2023 16:35:07 +0800 Subject: [PATCH 73/73] fix(ds): make dialyzer happy --- apps/emqx_durable_storage/src/emqx_ds_app.erl | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/apps/emqx_durable_storage/src/emqx_ds_app.erl b/apps/emqx_durable_storage/src/emqx_ds_app.erl index 73269d61c..216e979ee 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_app.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_app.erl @@ -4,6 +4,8 @@ -module(emqx_ds_app). +-dialyzer({nowarn_function, storage/0}). + -export([start/2]). -include("emqx_ds_int.hrl"). @@ -14,20 +16,21 @@ start(_Type, _Args) -> init_mnesia() -> %% FIXME: This is a temporary workaround to avoid crashes when starting on Windows - Storage = - case mria:rocksdb_backend_available() of - true -> - rocksdb_copies; - _ -> - disc_copies - end, ok = mria:create_table( ?SESSION_TAB, [ {rlog_shard, ?DS_SHARD}, {type, set}, - {storage, Storage}, + {storage, storage()}, {record_name, session}, {attributes, record_info(fields, session)} ] ). + +storage() -> + case mria:rocksdb_backend_available() of + true -> + rocksdb_copies; + _ -> + disc_copies + end.