How to analysis long duration snapshot - hazelcast

The DAG of pipeline:
digraph DAG {
"rocketmqSource(device_send_topic,tf-api-source-qa)" [localParallelism=2];
"flat-map" [localParallelism=2];
"mapUsingIMap" [localParallelism=1];
"flat-map-2" [localParallelism=1];
"map-stateful-global" [localParallelism=1];
"mapUsingIMap-2" [localParallelism=4];
"flat-map-3" [localParallelism=1];
"kafkaSink" [localParallelism=2];
"rocketmqSource(device_send_topic,tf-api-source-qa)" -> "flat-map" [queueSize=1024];
"flat-map" -> "mapUsingIMap" [queueSize=1024];
"mapUsingIMap" -> "flat-map-2" [queueSize=1024];
"flat-map-2" -> "map-stateful-global" [label="distributed-partitioned", queueSize=1024];
"map-stateful-global" -> "mapUsingIMap-2" [queueSize=1024];
"mapUsingIMap-2" -> "flat-map-3" [queueSize=1024];
"flat-map-3" -> "kafkaSink" [queueSize=1024];
}
Sometimes the snapshot duration is very long, one minute or more, which reduces the rate of processing data. Is there any way to analyze the problem?

Related

Memory peaks again shortly thereafter Erlang gc (fullsweep)?

Memory will drop immediately when Erlang fullsweep, but for a short time it goes up to the original peak and then it goes down.
When I stop loop for create memory after cast gc to TestPid for 3 minutes or more. Memory goes up to the original peak and then goes down after I start the loop.
How does it work?
This is my simple test code.
ts.erl
-module(ts).
-behaviour(gen_server).
%% API
-export([
start/0,
stop/0,
gc/0, loop_cnt/1
]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-define(SERVER, ?MODULE).
-define(BASE_CNT, 10000).
-define(ONE_LOOP_CNT, 200).
-record(state, {loop_cnt = 0, one_loop_cnt = 0}).
start() ->
gen_server:start({local, ?SERVER}, ?MODULE, [], []).
stop() ->
gen_server:cast(?SERVER, stop).
gc() ->
gen_server:cast(?SERVER, gc).
loop_cnt(Cnt) when is_number(Cnt) ->
gen_server:cast(?SERVER, {loop_cnt, Cnt}).
init([]) ->
io:format("start mem ~n"),
erlang:send_after(1, self(), start_add_mem),
{ok, #state{one_loop_cnt = ?ONE_LOOP_CNT}, 0}.
handle_call(_Req, _From, State) ->
{noreply, State}.
handle_cast(start_add_mem, State) ->
io:format("cast start_add_mem~n"),
{noreply, State};
handle_cast(gc, State) ->
io:format("garbage_info beforegc ~w ~n", [erlang:process_info(self(), garbage_collection)]),
{Us, _} = timer:tc(erlang, garbage_collect, [self()]),
io:format("do_gc cost:~wms ~n", [Us / 1000]),
{noreply, State};
handle_cast({loop_cnt, Cnt}, State) ->
io:format("set loop_cnt ~w ~n", [Cnt]),
{noreply, State#state{one_loop_cnt = Cnt}};
handle_cast(stop, State) ->
{stop, normal, State}.
handle_info(start_add_mem, #state{loop_cnt = Cnt, one_loop_cnt = OneLoopCnt} = State) ->
erlang:send_after(1000, self(), start_add_mem),
StartCnt = Cnt rem 100,
case Cnt rem 30 =:= 0 of
true ->
io:format("garbage_info ~w ~n", [erlang:process_info(self(), garbage_collection)]);
false ->
ok
end,
do_add_mem(StartCnt, OneLoopCnt),
try
{_, L} = erlang:process_info(self(), garbage_collection),
case lists:keyfind(minor_gcs, 1, L) of
false ->
io:format("gc find mingcs error~w ~n", [erlang:process_info(self(), garbage_collection)]);
{_, GcCount} ->
case GcCount =:= 0 of
true ->
io:format("mabay_trigger gc ~w ~n", [erlang:process_info(self(), garbage_collection)]);
_ ->
ok
end
end
catch _A:_B ->
io:format("gc print error ~w ~w ~n", [erlang:process_info(self(), garbage_collection), {_A, _B}]),
ok
end,
{noreply, State#state{loop_cnt = Cnt + 1}};
handle_info(_Req, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
do_add_mem(StartCnt, OneLoopCnt) ->
lists:foreach(
fun(I) ->
Dict = dict:new(),
NewDict = lists:foldl(
fun(J, AccDict) ->
dict:store(J, I, AccDict)
end, Dict, lists:seq(1, OneLoopCnt)),
L = lists:seq(1, OneLoopCnt),
case random:uniform() > 0.5 of
true ->
erlang:put({tm, I}, {NewDict, L});
false ->
erlang:put({tm, I}, undefined)
end
end, lists:seq(StartCnt * ?BASE_CNT, StartCnt * ?BASE_CNT + ?BASE_CNT)),
ok.
I think that peak is the GC creating a new heap while running a major GC, the second valley is when the previous heap is freed.
Taking into account that each process executes its GC at different times and that you usually don't have processes with a 5GB heap, you won't (or shouldn't) notice it in a production system.
The documentation about erlang's generational GC is really detailed, I think it deserves a careful read.
Also, you can use trace to get messages when a GC starts or ends:
erlang:trace(Pid, true, [monotonic_timestamp, garbage_collection]) or use process_info's total_heap_size and heap_size.

Modeling sequence of events in Alloy

The following model represents a sequence of actions given a certain pre-defined order.
open util/ordering[Time]
abstract sig Action {pre: set Action}
one sig A, B, C, D extends Action {}
fact{
pre = A -> B + D -> B + D -> C
}
sig Time { queue: Action -> lone State}
abstract sig State {}
one sig Acted, Ok, Nok extends State{}
pred Queue [t, t': Time] {
some a: Action-(t.queue).State |
a.pre in (t.queue).Ok + (t.queue).Nok and t'.queue=t.queue+(a->Acted)
}
pred Reply [t, t': Time] {
some a: (t.queue).Acted |
some s: State-Acted | t'.queue=t.queue++(a->s)
}
fact {
no first.queue
last.queue=Action->Ok or last.queue = Action -> Nok
all t:Time-last | Queue[t,t.next] or Reply[t,t.next]
}
run {last.queue=Action->Ok and some t:Time-last | t.queue = Action->Nok} for 9
With the run I would like to have a sequence where the last queueing action goes OK but some action failed before. However I don't get any instance.
Can someone explain me what am I doing wrong?
Regards, Andre.
The problem comes from the fact that once an action has, at a given time, a state which is Nok then it can't be changed back to another state in a future time(as suggested by the two quantifier in the Queue and Reply predicates).
The analyzer thus can't find an instance where in the final Time, all the actions are in an Ok state and where at a given time an action is in a Nok State. (This is what you request in your run command)
Hope it helps

nodejs readers/writers concurrency

Here's some simple code that demonstrates what I'm trying to do
myVar = 1
reader = () ->
getDataFromServer1().then ->
# uses myVar and does stuff according to its value
# returns promise
writer = () ->
getDataFromServer2().then ->
# assigns to myVar
# returns promise
Q.all([reader(), reader(), reader(), writer(), writer(), writer()]).done ->
console.log 'done'
So I have multiple threads running at the same time. some of them change the value of myVar and some read the value and rely on it. And I don't want a writer to write while another writer is writing or a reader is reading. Readers can read at the same time though. This is similar to the Readers–writers problem.
I tried to solve this by defining a sharedResource function as follows
sharedResource = (initialValue) ->
readLock = Q.fcall ->
writeLock = Q.fcall ->
value: initialValue
read: (action) ->
newPromise = writeLock.then action
readLock = Q.all([newPromise, readLock]).then -> null
newPromise
write: (action) ->
newPromise = Q.all([readLock, writeLock]).then action
writeLock = Q.all([newPromise, writeLock]).then -> null
newPromise
and then changed my code to use it
myVar = sharedResource 1
reader = () ->
myVar.read ->
# noone is writing to myVar while doing this request:
getDataFromServer1().then (data) ->
# read myVar.value instead of myVar, e.g.
expect(data == myVar.value)
writer = () ->
myVar.write ->
# noone reads or writes myVar while doing this request:
getDataFromServer2().then (data) ->
# change myVar.value instead of myVar, e.g.
myVar.value = data
Q.all([reader(), reader(), reader(), writer(), writer(), writer()]).done ->
console.log 'done'
This worked perfectly when I had only one sharedResource. Here's where the problem occurs
myVar1 = sharedResource 1
myVar2 = sharedResource 2
action1 = () ->
myVar1.read ->
myVar2.write ->
getDataFromServer1().then (data) ->
myVar2.value = data + myVar1.value
action2 = () ->
myVar2.read ->
myvar1.write ->
getDataFromServer2().then (data) ->
myVar1.value = data + myVar2.value
Q.all([action1(), action1(), action1(), action2(), action2(), action2()]).done ->
console.log 'done'
Here a case of deadlock happens. Each promise is waiting for the other one to resolve. None of them get resolved and the program stops.
Edit
I'll try my best to explain:
This is actually code to test my server. To see how it performs when multiple clients send multiple requests at the same time. Say for example every time action1 sends a request, the server increments a value stored in it's database. On the client side (the code that you are seeing) I also increment a variable which contains the value I expect to be on the server. And then when action2 sends a request, the server responds with that value and I assert the value in the response to be the same as my local variable.
So I have to get the lock before I send a request to make sure action2 doesn't ask for the variable while it is being changed.
I hope that helps.

How to enforce the left-to-right node ordering in GraphViz rank layout?

I am visualizing a collection of process with GraphViz. Each process consists of some Read or Write operations in program order. Naturally, it is desirable to arrange the operations in the left-to-right order with respect to each process.
Using GraphViz (version 2.28), my code goes like this:
digraph G
{
ranksep = 1.0; size = "10,10";
{
node [shape = plaintext, fontsize = 20];
0 -> 1 -> 2 -> 3 -> 4;
}
node [shape = box];
{rank = same;0;wy1;rf1;rc1;rz1;ry1;ra1;rb1;rx2;}
{rank = same;1;wf1;}
{rank = same;2;wx2;wc1;}
{rank = same;3;wf2;wz2;wx3;wa1;}
{rank = same;4;wz1;wy2;wx5;wb1;}
wy1 -> rf1;
rf1 -> rc1;
rc1 -> rz1;
rz1 -> ry1;
ry1 -> ra1;
ra1 -> rb1;
rb1 -> rx2;
wx2 -> wc1;
wf2 -> wz2;
wz2 -> wx3;
wx3 -> wa1;
wz1 -> wy2;
wy2 -> wx5;
wx5 -> wb1;
wf1 -> rf1[color = blue];
wc1 -> rc1[color = blue];
wz1 -> rz1[color = blue];
wy1 -> ry1[color = blue];
wa1 -> ra1[color = blue];
wb1 -> rb1[color = blue];
wx2 -> rx2[color = blue];
// W'WR Order:
wx3 -> wx2[style = dashed, color = red];
// W'WR Order:
wx5 -> wx2[style = dashed, color = red];
}
I am sorry to say that I am not allowed to post the output picture with too low reputation. If you can run the code, you will see that the result is not so satisfying due to the out of order in process with pid = 3. Specifically, GraphViz layout algorithm has rearranged the (ideal) order "wf2-> wz2 -> wa1 -> wx3" to "wx3, wf2, wz2, wa1". Therefore, my problem is:
My Problem: How to enforce the left-to-right node ordering in the rank environment?
With exploring in this site, I have found some similar problems and potential solutions. However, they just did not work in my specific example:
Graphviz .dot node ordering: the constraint = false option made my PDF picture worse. I checked the dot User's Manual which says:
During rank assignment, the head node of an edge is constrained to be on a higher rank than the tail node. If the edge has constraint=false, however, this requirement is not enforced.
Based on the above statements, (I guess) constraint = false option takes effect between different ranks instead of in the same rank.
Graphviz---random node order and edges going through labels: With surprise, the constraint = false option helped the "finite state machine" a lot in the same rank. Again, it does not save me from the trouble.
graphviz: circular layout while preserving node order: The process graph is dynamic both in number of nodes and edges. So, it maybe not attractive to use the absolute position for nodes (to cause many edge crossing?).
Thanks for any suggestions. And executable code will be appreciated very much.
digraph G
{
ranksep = 1.0; size = "10,10";
{
node [shape = plaintext, fontsize = 20];
0 -> 1 -> 2 -> 3 -> 4;
}
node [shape = box];
{
rank = same;
0->wy1->rf1->rc1->rz1->ry1->ra1->rb1->rx2 [color=white];
rankdir=LR;
}
{
rank = same;
1->wf1[color=white];
rankdir=LR
}
{
rank = same;
2->wx2->wc1[color=white];
rankdir=LR;
}
{
rank = same;
3->wf2->wz2->wx3->wa1[color=white];
rankdir=LR;
}
{
rank = same;
4->wz1->wy2->wx5->wb1[color=white];
rankdir=LR;
}
wy1 -> rf1;
rf1 -> rc1;
rc1 -> rz1;
rz1 -> ry1;
ry1 -> ra1;
ra1 -> rb1;
rb1 -> rx2;
wx2 -> wc1;
wf2 -> wz2;
wz2 -> wx3;
wx3 -> wa1;
wz1 -> wy2;
wy2 -> wx5;
wx5 -> wb1;
wf1 -> rf1[color = blue];
wc1 -> rc1[color = blue];
wz1 -> rz1[color = blue];
wy1 -> ry1[color = blue];
wa1 -> ra1[color = blue];
wb1 -> rb1[color = blue];
wx2 -> rx2[color = blue];
// W'WR Order:
wx3 -> wx2[style = dashed, color = red];
// W'WR Order:
wx5 -> wx2[style = dashed, color = red];
}
I am not quite sure that i've correctly got your problem, but try using this and comment please if it is what you want. I've added invisible edges for correct ranking of nodes and used rankdir to use left-right layout.

How to find "nearest" value in a large list in Erlang

Suppose I have a large collection of integers (say 50,000,000 of them).
I would like to write a function that returns me the largest integer in the collection that doesn't exceed a value passed as a parameter to the function. E.g. if the values were:
Values = [ 10, 20, 30, 40, 50, 60]
then find(Values, 25) should return 20.
The function will be called many times a second and the collection is large. Assuming that the performance of a brute-force search is too slow, what would be an efficient way to do it? The integers would rarely change, so they can be stored in a data structure that would give the fastest access.
I've looked at gb_trees but I don't think you can obtain the "insertion point" and then get the previous entry.
I realise I could do this from scratch by building my own tree structure, or binary chopping a sorted array, but is there some built-in way to do it that I've overlooked?
To find nearest value in large unsorted list I'd suggest you to use divide and conquer strategy - and process different parts of list in parallel. But enough small parts of list may be processed sequentially.
Here is code for you:
-module( finder ).
-export( [ nearest/2 ] ).
-define( THRESHOLD, 1000 ).
%%
%% sequential finding of nearest value
%%
%% if nearest value doesn't exists - return null
%%
nearest( Val, List ) when length(List) =< ?THRESHOLD ->
lists:foldl(
fun
( X, null ) when X < Val ->
X;
( _X, null ) ->
null;
( X, Nearest ) when X < Val, X > Nearest ->
X;
( _X, Nearest ) ->
Nearest
end,
null,
List );
%%
%% split large lists and process each part in parallel
%%
nearest( Val, List ) ->
{ Left, Right } = lists:split( length(List) div 2, List ),
Ref1 = spawn_nearest( Val, Left ),
Ref2 = spawn_nearest( Val, Right ),
Nearest1 = receive_nearest( Ref1 ),
Nearest2 = receive_nearest( Ref2 ),
%%
%% compare nearest values from each part
%%
case { Nearest1, Nearest2 } of
{ null, null } ->
null;
{ null, Nearest2 } ->
Nearest2;
{ Nearest1, null } ->
Nearest1;
{ Nearest1, Nearest2 } when Nearest2 > Nearest1 ->
Nearest2;
{ Nearest1, Nearest2 } when Nearest2 =< Nearest1 ->
Nearest1
end.
spawn_nearest( Val, List ) ->
Ref = make_ref(),
SelfPid = self(),
spawn(
fun() ->
SelfPid ! { Ref, nearest( Val, List ) }
end ),
Ref.
receive_nearest( Ref ) ->
receive
{ Ref, Nearest } -> Nearest
end.
Testing in shell:
1> c(finder).
{ok,finder}
2>
2> List = [ random:uniform(1000) || _X <- lists:seq(1,100000) ].
[444,724,946,502,312,598,916,667,478,597,143,210,698,160,
559,215,458,422,6,563,476,401,310,59,579,990,331,184,203|...]
3>
3> finder:nearest( 500, List ).
499
4>
4> finder:nearest( -100, lists:seq(1,100000) ).
null
5>
5> finder:nearest( 40000, lists:seq(1,100000) ).
39999
6>
6> finder:nearest( 4000000, lists:seq(1,100000) ).
100000
Performance: (single node)
7>
7> timer:tc( finder, nearest, [ 40000, lists:seq(1,10000) ] ).
{3434,10000}
8>
8> timer:tc( finder, nearest, [ 40000, lists:seq(1,100000) ] ).
{21736,39999}
9>
9> timer:tc( finder, nearest, [ 40000, lists:seq(1,1000000) ] ).
{314399,39999}
Versus plain iterating:
1>
1> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,10000) ] ).
{14994,null}
2>
2> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,100000) ] ).
{141951,null}
3>
3> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,1000000) ] ).
{1374426,null}
So, yo may see, that on list with 1000000 elements, function finder:nearest is faster than plain iterating through list with lists:foldl.
You may find optimal value of THRESHOLD in your case.
Also you may improve performance, if spawn processes on different nodes.
Here is another code sample that uses ets. I believe a lookup would be made in about constant time:
1> ets:new(tab,[named_table, ordered_set, public]).
2> lists:foreach(fun(N) -> ets:insert(tab,{N,[]}) end, lists:seq(1,50000000)).
3> timer:tc(fun() -> ets:prev(tab, 500000) end).
{21,499999}
4> timer:tc(fun() -> ets:prev(tab, 41230000) end).
{26,41229999}
The code surrounding would be a bit more than this of course but it is rather neat
So if the input isn't sorted, you can get a linear version by doing:
closest(Target, [Hd | Tl ]) ->
closest(Target, Tl, Hd).
closest(_Target, [], Best) -> Best;
closest(Target, [ Target | _ ], _) -> Target;
closest(Target, [ N | Rest ], Best) ->
CurEps = erlang:abs(Target - Best),
NewEps = erlang:abs(Target - N),
if NewEps < CurEps ->
closest(Target, Rest, N);
true ->
closest(Target, Rest, Best)
end.
You should be able to do better if the input is sorted.
I invented my own metric for 'closest' here as I allow the closest value to be higher than the target value - you could change it to be 'closest but not greater than' if you liked.
In my opinion, if you have a huge collection of data that does not change often, you shoud think about organize it.
I have wrote a simple one based on ordered list, including insertion an deletion functions. It gives good results for both inserting and searching.
-module(finder).
-export([test/1,find/2,insert/2,remove/2,new/0]).
-compile(export_all).
new() -> [].
insert(V,L) ->
{R,P} = locate(V,L,undefined,-1),
insert(V,R,P,L).
find(V,L) ->
locate(V,L,undefined,-1).
remove(V,L) ->
{R,P} = locate(V,L,undefined,-1),
remove(V,R,P,L).
test(Max) ->
{A,B,C} = erlang:now(),
random:seed(A,B,C),
L = lists:seq(0,100*Max,100),
S = random:uniform(100000000),
I = random:uniform(100000000),
io:format("start insert at ~p~n",[erlang:now()]),
L1 = insert(I,L),
io:format("start find at ~p~n",[erlang:now()]),
R = find(S,L1),
io:format("end at ~p~n result is ~p~n",[erlang:now(),R]).
remove(_,_,-1,L) -> L;
remove(V,V,P,L) ->
{L1,[V|L2]} = lists:split(P,L),
L1 ++ L2;
remove(_,_,_,L) ->L.
insert(V,V,_,L) -> L;
insert(V,_,-1,L) -> [V|L];
insert(V,_,P,L) ->
{L1,L2} = lists:split(P+1,L),
L1 ++ [V] ++ L2.
locate(_,[],R,P) -> {R,P};
locate (V,L,R,P) ->
%% io:format("locate, value = ~p, liste = ~p, current result = ~p, current pos = ~p~n",[V,L,R,P]),
{L1,[M|L2]} = lists:split(Le1 = (length(L) div 2), L),
locate(V,R,P,Le1+1,L1,M,L2).
locate(V,_,P,Le,_,V,_) -> {V,P+Le};
locate(V,_,P,Le,_,M,L2) when V > M -> locate(V,L2,M,P+Le);
locate(V,R,P,_,L1,_,_) -> locate(V,L1,R,P).
which give the following results
(exec#WXFRB1824L)6> finder:test(10000000).
start insert at {1347,28177,618000}
start find at {1347,28178,322000}
end at {1347,28178,728000}
result is {72983500,729836}
that is 704ms to insert a new value in a list of 10 000 000 elements and 406ms to find the nearest value int the same list.
I tried to have a more accurate information about the performance of the algorithm I proposed above, an reading the very interesting solution of Stemm, I decide to use the tc:timer/3 function. Big deception :o). On my laptop, I got a very bad accuracy of the time. So I decided to left my corei5 (2 cores * 2 threads) + 2Gb DDR3 + windows XP 32bit to use my home PC: Phantom (6 cores) + 8Gb + Linux 64bit.
Now tc:timer works as expected, I am able to manipulate lists of 100 000 000 integers. I was able to see that I was loosing a lot of time calling at each step the length function, so I re-factored the code a little to avoid it:
-module(finder).
-export([test/2,find/2,insert/2,remove/2,new/0]).
%% interface
new() -> {0,[]}.
insert(V,{S,L}) ->
{R,P} = locate(V,L,S,undefined,-1),
insert(V,R,P,L,S).
find(V,{S,L}) ->
locate(V,L,S,undefined,-1).
remove(V,{S,L}) ->
{R,P} = locate(V,L,S,undefined,-1),
remove(V,R,P,L,S).
remove(_,_,-1,L,S) -> {S,L};
remove(V,V,P,L,S) ->
{L1,[V|L2]} = lists:split(P,L),
{S-1,L1 ++ L2};
remove(_,_,_,L,S) ->{S,L}.
%% local
insert(V,V,_,L,S) -> {S,L};
insert(V,_,-1,L,S) -> {S+1,[V|L]};
insert(V,_,P,L,S) ->
{L1,L2} = lists:split(P+1,L),
{S+1,L1 ++ [V] ++ L2}.
locate(_,[],_,R,P) -> {R,P};
locate (V,L,S,R,P) ->
S1 = S div 2,
S2 = S - S1 -1,
{L1,[M|L2]} = lists:split(S1, L),
locate(V,R,P,S1+1,L1,S1,M,L2,S2).
locate(V,_,P,Le,_,_,V,_,_) -> {V,P+Le};
locate(V,_,P,Le,_,_,M,L2,S2) when V > M -> locate(V,L2,S2,M,P+Le);
locate(V,R,P,_,L1,S1,_,_,_) -> locate(V,L1,S1,R,P).
%% test
test(Max,Iter) ->
{A,B,C} = erlang:now(),
random:seed(A,B,C),
L = {Max+1,lists:seq(0,100*Max,100)},
Ins = test_insert(L,Iter,[]),
io:format("insert:~n~s~n",[stat(Ins,Iter)]),
Fin = test_find(L,Iter,[]),
io:format("find:~n ~s~n",[stat(Fin,Iter)]).
test_insert(_L,0,Res) -> Res;
test_insert(L,I,Res) ->
V = random:uniform(1000000000),
{T,_} = timer:tc(finder,insert,[V,L]),
test_insert(L,I-1,[T|Res]).
test_find(_L,0,Res) -> Res;
test_find(L,I,Res) ->
V = random:uniform(1000000000),
{T,_} = timer:tc(finder,find,[V,L]),
test_find(L,I-1,[T|Res]).
stat(L,N) ->
Aver = lists:sum(L)/N,
{Min,Max,Var} = lists:foldl(fun (X,{Mi,Ma,Va}) -> {min(X,Mi),max(X,Ma),Va+(X-Aver)*(X-Aver)} end, {999999999999999999999999999,0,0}, L),
Sig = math:sqrt(Var/N),
io_lib:format(" average: ~p,~n minimum: ~p,~n maximum: ~p,~n sigma : ~p.~n",[Aver,Min,Max,Sig]).
Here are some results.
1> finder:test(1000,10).
insert:
average: 266.7,
minimum: 216,
maximum: 324,
sigma : 36.98121144581393.
find:
average: 136.1,
minimum: 105,
maximum: 162,
sigma : 15.378231367748375.
ok
2> finder:test(100000,10).
insert:
average: 10096.5,
minimum: 9541,
maximum: 12222,
sigma : 762.5642595873478.
find:
average: 5077.4,
minimum: 4666,
maximum: 6937,
sigma : 627.126494417195.
ok
3> finder:test(1000000,10).
insert:
average: 109871.1,
minimum: 94747,
maximum: 139916,
sigma : 13852.211285206417.
find:
average: 40428.0,
minimum: 31297,
maximum: 56965,
sigma : 7797.425562325042.
ok
4> finder:test(100000000,10).
insert:
average: 8067547.8,
minimum: 6265625,
maximum: 16590349,
sigma : 3199868.809140206.
find:
average: 8484876.4,
minimum: 5158504,
maximum: 15950944,
sigma : 4044848.707872872.
ok
On the 100 000 000 list, it is slow, and the multi process solution cannot help on this dichotomy algorithm... It is a weak point of this solution, but if you have several processes in parallel requesting to find a nearest value, it will be able to use the multicore anyway.
Pascal.

Resources