Memory will drop immediately when Erlang fullsweep, but for a short time it goes up to the original peak and then it goes down.
When I stop loop for create memory after cast gc to TestPid for 3 minutes or more. Memory goes up to the original peak and then goes down after I start the loop.
How does it work?
This is my simple test code.
ts.erl
-module(ts).
-behaviour(gen_server).
%% API
-export([
start/0,
stop/0,
gc/0, loop_cnt/1
]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-define(SERVER, ?MODULE).
-define(BASE_CNT, 10000).
-define(ONE_LOOP_CNT, 200).
-record(state, {loop_cnt = 0, one_loop_cnt = 0}).
start() ->
gen_server:start({local, ?SERVER}, ?MODULE, [], []).
stop() ->
gen_server:cast(?SERVER, stop).
gc() ->
gen_server:cast(?SERVER, gc).
loop_cnt(Cnt) when is_number(Cnt) ->
gen_server:cast(?SERVER, {loop_cnt, Cnt}).
init([]) ->
io:format("start mem ~n"),
erlang:send_after(1, self(), start_add_mem),
{ok, #state{one_loop_cnt = ?ONE_LOOP_CNT}, 0}.
handle_call(_Req, _From, State) ->
{noreply, State}.
handle_cast(start_add_mem, State) ->
io:format("cast start_add_mem~n"),
{noreply, State};
handle_cast(gc, State) ->
io:format("garbage_info beforegc ~w ~n", [erlang:process_info(self(), garbage_collection)]),
{Us, _} = timer:tc(erlang, garbage_collect, [self()]),
io:format("do_gc cost:~wms ~n", [Us / 1000]),
{noreply, State};
handle_cast({loop_cnt, Cnt}, State) ->
io:format("set loop_cnt ~w ~n", [Cnt]),
{noreply, State#state{one_loop_cnt = Cnt}};
handle_cast(stop, State) ->
{stop, normal, State}.
handle_info(start_add_mem, #state{loop_cnt = Cnt, one_loop_cnt = OneLoopCnt} = State) ->
erlang:send_after(1000, self(), start_add_mem),
StartCnt = Cnt rem 100,
case Cnt rem 30 =:= 0 of
true ->
io:format("garbage_info ~w ~n", [erlang:process_info(self(), garbage_collection)]);
false ->
ok
end,
do_add_mem(StartCnt, OneLoopCnt),
try
{_, L} = erlang:process_info(self(), garbage_collection),
case lists:keyfind(minor_gcs, 1, L) of
false ->
io:format("gc find mingcs error~w ~n", [erlang:process_info(self(), garbage_collection)]);
{_, GcCount} ->
case GcCount =:= 0 of
true ->
io:format("mabay_trigger gc ~w ~n", [erlang:process_info(self(), garbage_collection)]);
_ ->
ok
end
end
catch _A:_B ->
io:format("gc print error ~w ~w ~n", [erlang:process_info(self(), garbage_collection), {_A, _B}]),
ok
end,
{noreply, State#state{loop_cnt = Cnt + 1}};
handle_info(_Req, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
do_add_mem(StartCnt, OneLoopCnt) ->
lists:foreach(
fun(I) ->
Dict = dict:new(),
NewDict = lists:foldl(
fun(J, AccDict) ->
dict:store(J, I, AccDict)
end, Dict, lists:seq(1, OneLoopCnt)),
L = lists:seq(1, OneLoopCnt),
case random:uniform() > 0.5 of
true ->
erlang:put({tm, I}, {NewDict, L});
false ->
erlang:put({tm, I}, undefined)
end
end, lists:seq(StartCnt * ?BASE_CNT, StartCnt * ?BASE_CNT + ?BASE_CNT)),
ok.
I think that peak is the GC creating a new heap while running a major GC, the second valley is when the previous heap is freed.
Taking into account that each process executes its GC at different times and that you usually don't have processes with a 5GB heap, you won't (or shouldn't) notice it in a production system.
The documentation about erlang's generational GC is really detailed, I think it deserves a careful read.
Also, you can use trace to get messages when a GC starts or ends:
erlang:trace(Pid, true, [monotonic_timestamp, garbage_collection]) or use process_info's total_heap_size and heap_size.
Suppose I have a large collection of integers (say 50,000,000 of them).
I would like to write a function that returns me the largest integer in the collection that doesn't exceed a value passed as a parameter to the function. E.g. if the values were:
Values = [ 10, 20, 30, 40, 50, 60]
then find(Values, 25) should return 20.
The function will be called many times a second and the collection is large. Assuming that the performance of a brute-force search is too slow, what would be an efficient way to do it? The integers would rarely change, so they can be stored in a data structure that would give the fastest access.
I've looked at gb_trees but I don't think you can obtain the "insertion point" and then get the previous entry.
I realise I could do this from scratch by building my own tree structure, or binary chopping a sorted array, but is there some built-in way to do it that I've overlooked?
To find nearest value in large unsorted list I'd suggest you to use divide and conquer strategy - and process different parts of list in parallel. But enough small parts of list may be processed sequentially.
Here is code for you:
-module( finder ).
-export( [ nearest/2 ] ).
-define( THRESHOLD, 1000 ).
%%
%% sequential finding of nearest value
%%
%% if nearest value doesn't exists - return null
%%
nearest( Val, List ) when length(List) =< ?THRESHOLD ->
lists:foldl(
fun
( X, null ) when X < Val ->
X;
( _X, null ) ->
null;
( X, Nearest ) when X < Val, X > Nearest ->
X;
( _X, Nearest ) ->
Nearest
end,
null,
List );
%%
%% split large lists and process each part in parallel
%%
nearest( Val, List ) ->
{ Left, Right } = lists:split( length(List) div 2, List ),
Ref1 = spawn_nearest( Val, Left ),
Ref2 = spawn_nearest( Val, Right ),
Nearest1 = receive_nearest( Ref1 ),
Nearest2 = receive_nearest( Ref2 ),
%%
%% compare nearest values from each part
%%
case { Nearest1, Nearest2 } of
{ null, null } ->
null;
{ null, Nearest2 } ->
Nearest2;
{ Nearest1, null } ->
Nearest1;
{ Nearest1, Nearest2 } when Nearest2 > Nearest1 ->
Nearest2;
{ Nearest1, Nearest2 } when Nearest2 =< Nearest1 ->
Nearest1
end.
spawn_nearest( Val, List ) ->
Ref = make_ref(),
SelfPid = self(),
spawn(
fun() ->
SelfPid ! { Ref, nearest( Val, List ) }
end ),
Ref.
receive_nearest( Ref ) ->
receive
{ Ref, Nearest } -> Nearest
end.
Testing in shell:
1> c(finder).
{ok,finder}
2>
2> List = [ random:uniform(1000) || _X <- lists:seq(1,100000) ].
[444,724,946,502,312,598,916,667,478,597,143,210,698,160,
559,215,458,422,6,563,476,401,310,59,579,990,331,184,203|...]
3>
3> finder:nearest( 500, List ).
499
4>
4> finder:nearest( -100, lists:seq(1,100000) ).
null
5>
5> finder:nearest( 40000, lists:seq(1,100000) ).
39999
6>
6> finder:nearest( 4000000, lists:seq(1,100000) ).
100000
Performance: (single node)
7>
7> timer:tc( finder, nearest, [ 40000, lists:seq(1,10000) ] ).
{3434,10000}
8>
8> timer:tc( finder, nearest, [ 40000, lists:seq(1,100000) ] ).
{21736,39999}
9>
9> timer:tc( finder, nearest, [ 40000, lists:seq(1,1000000) ] ).
{314399,39999}
Versus plain iterating:
1>
1> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,10000) ] ).
{14994,null}
2>
2> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,100000) ] ).
{141951,null}
3>
3> timer:tc( lists, foldl, [ fun(_X, Acc) -> Acc end, null, lists:seq(1,1000000) ] ).
{1374426,null}
So, yo may see, that on list with 1000000 elements, function finder:nearest is faster than plain iterating through list with lists:foldl.
You may find optimal value of THRESHOLD in your case.
Also you may improve performance, if spawn processes on different nodes.
Here is another code sample that uses ets. I believe a lookup would be made in about constant time:
1> ets:new(tab,[named_table, ordered_set, public]).
2> lists:foreach(fun(N) -> ets:insert(tab,{N,[]}) end, lists:seq(1,50000000)).
3> timer:tc(fun() -> ets:prev(tab, 500000) end).
{21,499999}
4> timer:tc(fun() -> ets:prev(tab, 41230000) end).
{26,41229999}
The code surrounding would be a bit more than this of course but it is rather neat
So if the input isn't sorted, you can get a linear version by doing:
closest(Target, [Hd | Tl ]) ->
closest(Target, Tl, Hd).
closest(_Target, [], Best) -> Best;
closest(Target, [ Target | _ ], _) -> Target;
closest(Target, [ N | Rest ], Best) ->
CurEps = erlang:abs(Target - Best),
NewEps = erlang:abs(Target - N),
if NewEps < CurEps ->
closest(Target, Rest, N);
true ->
closest(Target, Rest, Best)
end.
You should be able to do better if the input is sorted.
I invented my own metric for 'closest' here as I allow the closest value to be higher than the target value - you could change it to be 'closest but not greater than' if you liked.
In my opinion, if you have a huge collection of data that does not change often, you shoud think about organize it.
I have wrote a simple one based on ordered list, including insertion an deletion functions. It gives good results for both inserting and searching.
-module(finder).
-export([test/1,find/2,insert/2,remove/2,new/0]).
-compile(export_all).
new() -> [].
insert(V,L) ->
{R,P} = locate(V,L,undefined,-1),
insert(V,R,P,L).
find(V,L) ->
locate(V,L,undefined,-1).
remove(V,L) ->
{R,P} = locate(V,L,undefined,-1),
remove(V,R,P,L).
test(Max) ->
{A,B,C} = erlang:now(),
random:seed(A,B,C),
L = lists:seq(0,100*Max,100),
S = random:uniform(100000000),
I = random:uniform(100000000),
io:format("start insert at ~p~n",[erlang:now()]),
L1 = insert(I,L),
io:format("start find at ~p~n",[erlang:now()]),
R = find(S,L1),
io:format("end at ~p~n result is ~p~n",[erlang:now(),R]).
remove(_,_,-1,L) -> L;
remove(V,V,P,L) ->
{L1,[V|L2]} = lists:split(P,L),
L1 ++ L2;
remove(_,_,_,L) ->L.
insert(V,V,_,L) -> L;
insert(V,_,-1,L) -> [V|L];
insert(V,_,P,L) ->
{L1,L2} = lists:split(P+1,L),
L1 ++ [V] ++ L2.
locate(_,[],R,P) -> {R,P};
locate (V,L,R,P) ->
%% io:format("locate, value = ~p, liste = ~p, current result = ~p, current pos = ~p~n",[V,L,R,P]),
{L1,[M|L2]} = lists:split(Le1 = (length(L) div 2), L),
locate(V,R,P,Le1+1,L1,M,L2).
locate(V,_,P,Le,_,V,_) -> {V,P+Le};
locate(V,_,P,Le,_,M,L2) when V > M -> locate(V,L2,M,P+Le);
locate(V,R,P,_,L1,_,_) -> locate(V,L1,R,P).
which give the following results
(exec#WXFRB1824L)6> finder:test(10000000).
start insert at {1347,28177,618000}
start find at {1347,28178,322000}
end at {1347,28178,728000}
result is {72983500,729836}
that is 704ms to insert a new value in a list of 10 000 000 elements and 406ms to find the nearest value int the same list.
I tried to have a more accurate information about the performance of the algorithm I proposed above, an reading the very interesting solution of Stemm, I decide to use the tc:timer/3 function. Big deception :o). On my laptop, I got a very bad accuracy of the time. So I decided to left my corei5 (2 cores * 2 threads) + 2Gb DDR3 + windows XP 32bit to use my home PC: Phantom (6 cores) + 8Gb + Linux 64bit.
Now tc:timer works as expected, I am able to manipulate lists of 100 000 000 integers. I was able to see that I was loosing a lot of time calling at each step the length function, so I re-factored the code a little to avoid it:
-module(finder).
-export([test/2,find/2,insert/2,remove/2,new/0]).
%% interface
new() -> {0,[]}.
insert(V,{S,L}) ->
{R,P} = locate(V,L,S,undefined,-1),
insert(V,R,P,L,S).
find(V,{S,L}) ->
locate(V,L,S,undefined,-1).
remove(V,{S,L}) ->
{R,P} = locate(V,L,S,undefined,-1),
remove(V,R,P,L,S).
remove(_,_,-1,L,S) -> {S,L};
remove(V,V,P,L,S) ->
{L1,[V|L2]} = lists:split(P,L),
{S-1,L1 ++ L2};
remove(_,_,_,L,S) ->{S,L}.
%% local
insert(V,V,_,L,S) -> {S,L};
insert(V,_,-1,L,S) -> {S+1,[V|L]};
insert(V,_,P,L,S) ->
{L1,L2} = lists:split(P+1,L),
{S+1,L1 ++ [V] ++ L2}.
locate(_,[],_,R,P) -> {R,P};
locate (V,L,S,R,P) ->
S1 = S div 2,
S2 = S - S1 -1,
{L1,[M|L2]} = lists:split(S1, L),
locate(V,R,P,S1+1,L1,S1,M,L2,S2).
locate(V,_,P,Le,_,_,V,_,_) -> {V,P+Le};
locate(V,_,P,Le,_,_,M,L2,S2) when V > M -> locate(V,L2,S2,M,P+Le);
locate(V,R,P,_,L1,S1,_,_,_) -> locate(V,L1,S1,R,P).
%% test
test(Max,Iter) ->
{A,B,C} = erlang:now(),
random:seed(A,B,C),
L = {Max+1,lists:seq(0,100*Max,100)},
Ins = test_insert(L,Iter,[]),
io:format("insert:~n~s~n",[stat(Ins,Iter)]),
Fin = test_find(L,Iter,[]),
io:format("find:~n ~s~n",[stat(Fin,Iter)]).
test_insert(_L,0,Res) -> Res;
test_insert(L,I,Res) ->
V = random:uniform(1000000000),
{T,_} = timer:tc(finder,insert,[V,L]),
test_insert(L,I-1,[T|Res]).
test_find(_L,0,Res) -> Res;
test_find(L,I,Res) ->
V = random:uniform(1000000000),
{T,_} = timer:tc(finder,find,[V,L]),
test_find(L,I-1,[T|Res]).
stat(L,N) ->
Aver = lists:sum(L)/N,
{Min,Max,Var} = lists:foldl(fun (X,{Mi,Ma,Va}) -> {min(X,Mi),max(X,Ma),Va+(X-Aver)*(X-Aver)} end, {999999999999999999999999999,0,0}, L),
Sig = math:sqrt(Var/N),
io_lib:format(" average: ~p,~n minimum: ~p,~n maximum: ~p,~n sigma : ~p.~n",[Aver,Min,Max,Sig]).
Here are some results.
1> finder:test(1000,10).
insert:
average: 266.7,
minimum: 216,
maximum: 324,
sigma : 36.98121144581393.
find:
average: 136.1,
minimum: 105,
maximum: 162,
sigma : 15.378231367748375.
ok
2> finder:test(100000,10).
insert:
average: 10096.5,
minimum: 9541,
maximum: 12222,
sigma : 762.5642595873478.
find:
average: 5077.4,
minimum: 4666,
maximum: 6937,
sigma : 627.126494417195.
ok
3> finder:test(1000000,10).
insert:
average: 109871.1,
minimum: 94747,
maximum: 139916,
sigma : 13852.211285206417.
find:
average: 40428.0,
minimum: 31297,
maximum: 56965,
sigma : 7797.425562325042.
ok
4> finder:test(100000000,10).
insert:
average: 8067547.8,
minimum: 6265625,
maximum: 16590349,
sigma : 3199868.809140206.
find:
average: 8484876.4,
minimum: 5158504,
maximum: 15950944,
sigma : 4044848.707872872.
ok
On the 100 000 000 list, it is slow, and the multi process solution cannot help on this dichotomy algorithm... It is a weak point of this solution, but if you have several processes in parallel requesting to find a nearest value, it will be able to use the multicore anyway.
Pascal.