Hazelcast Client - stuck thread - multithreading

I'm running into a hung thread issue using Hazelcast 3.5.1
My applications will run and then silently stop working.
It appears that I have multiple threads in the HZ client that are stuck.
Client Trace
State:TIMED_WAITING
Priority:5
java.lang.Object.wait(Native Method)
com.hazelcast.client.spi.impl.ClientInvocationFuture.get(ClientInvocationFuture.java:104)
com.hazelcast.client.spi.impl.ClientInvocationFuture.get(ClientInvocationFuture.java:89)
com.hazelcast.client.spi.ClientProxy.invoke(ClientProxy.java:130)
com.hazelcast.client.proxy.ClientMapProxy.get(ClientMapProxy.java:197)
Server Error
[ERROR] [2015-07-29 18:20:12,812] [hz._hzInstance_1_dev.partition-operation.thread-0] [][c.h.m.i.o.GetOperation] [[198.47.158.82]:5900 [dev] [3.5.1] io.protostuff.UninitializedMessageException]
com.hazelcast.nio.serialization.HazelcastSerializationException: io.protostuff.UninitializedMessageException
at com.hazelcast.nio.serialization.SerializationServiceImpl.handleException(SerializationServiceImpl.java:380) ~[hazelcast-3.5.1.jar:3.5.1]
at com.hazelcast.nio.serialization.SerializationServiceImpl.toData(SerializationServiceImpl.java:235) ~[hazelcast-3.5.1.jar:3.5.1]
at com.hazelcast.map.impl.record.DataRecordFactory.newRecord(DataRecordFactory.java:47) ~[hazelcast-3.5.1.jar:3.5.1]
Client Config
public ClientConfig config() {
final ClientConfig config = new ClientConfig();
config.setExecutorPoolSize(100);
setupLoggingConfig(config);
setupNetworkConfig(config);
setupGroupConfig(config);
setupSerializationConfig(config);
setupAdvancedConfig(config);
return config;
}
private void setupAdvancedConfig(final ClientConfig config) {
config.setProperty(GroupProperties.PROP_OPERATION_CALL_TIMEOUT_MILLIS, String.valueOf(5000));
}
private void setupLoggingConfig(final ClientConfig config) {
config.setProperty("hazelcast.logging.type", "slf4j");
}
private void setupNetworkConfig(final ClientConfig config) {
final ClientNetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setConnectionTimeout(1000);
networkConfig.setConnectionAttemptPeriod(3000);
networkConfig.setConnectionAttemptLimit(2);
networkConfig.setRedoOperation(true);
networkConfig.setSmartRouting(true);
setupNetworkSocketConfig(networkConfig);
}
private void setupNetworkSocketConfig(final ClientNetworkConfig networkConfig) {
final SocketOptions socketOptions = networkConfig.getSocketOptions();
socketOptions.setKeepAlive(false);
socketOptions.setBufferSize(32);
socketOptions.setLingerSeconds(3);
socketOptions.setReuseAddress(false);
socketOptions.setTcpNoDelay(false);
}
Server Config
private void init(final Config config) {
setupExecutorConfig(config);
setupLoggingConfig(config);
setupMapConfigs(config);
setupNetworkConfig(config);
setupGroupConfig(config);
setupAdvancedConfig(config);
setupSerializationConfig(config);
}
private void setupAdvancedConfig(final Config config) {
config.setProperty(GroupProperties.PROP_OPERATION_CALL_TIMEOUT_MILLIS, String.valueOf(5000));
}
private void setupExecutorConfig(final Config config) {
final ExecutorConfig executorConfig = new ExecutorConfig();
executorConfig.setPoolSize(300);
config.addExecutorConfig(executorConfig);
}
private void setupLoggingConfig(final Config config) {
config.setProperty("hazelcast.logging.type", "slf4j");
}
private void setupNetworkConfig(final Config config) {
final NetworkConfig networkCfg = config.getNetworkConfig();
networkCfg.setPort(5900);
networkCfg.setPortAutoIncrement(false);
final JoinConfig join = networkCfg.getJoin();
join.getMulticastConfig().setEnabled(false);
for (final String server : getServers()) {
join.getTcpIpConfig().addMember(server);
}
}
private String[] getServers() {
return PROPS.getProperty("store.servers").split(",");
}
private void setupMapConfigs(final Config config) {
setupMapConfigXXX(config);
}
private void setupMapConfigXXX(final Config config) {
final MapConfig mapConfig = setupMapConfigByName(config, XXX.class.getName());
setupMapStoreConfigDummy(mapConfig);
setupEvictionPolicy(mapConfig);
}
private void setupMapStoreConfigDummy(final MapConfig mapConfig) {
final MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setClassName(DummyStore.class.getName()).setEnabled(true);
mapConfig.setMapStoreConfig(mapStoreConfig);
}
private void setupEvictionPolicy(final MapConfig mapConfig) {
mapConfig.setEvictionPolicy(EvictionPolicy.LFU);
mapConfig.setMaxSizeConfig(oneGBSize());
}
private MapConfig setupMapConfigByName(final Config config, final String mapName) {
final MapConfig mapConfig = new MapConfig();
mapConfig.setName(mapName);
mapConfig.setBackupCount(1);
final NearCacheConfig nearCacheConfig = new NearCacheConfig();
nearCacheConfig.setMaxSize(1000).setMaxIdleSeconds(300).setTimeToLiveSeconds(300);
mapConfig.setNearCacheConfig(nearCacheConfig);
config.addMapConfig(mapConfig);
return mapConfig;
}
private MaxSizeConfig oneGBSize() {
final MaxSizeConfig config = new MaxSizeConfig();
config.setMaxSizePolicy(MaxSizePolicy.USED_HEAP_SIZE);
config.setSize(1024);
return config;
}
I would expect the client to timeout but that doesn't appear to be happening.

I believe you should configure the client-side timeout via the property ClientProperties.PROP_INVOCATION_TIMEOUT_SECONDS
However it's just a band-aid. You should find out the real root cause why your serialization is failing.

Related

MapConfig in Hazelcast Jet

I was using below configuration in Hazelcast IMDG.
Now I want to use same configuration with jet also.
#Bean
public static Config config() {
System.err.println("config class");
Config config = new Config();
config.setInstanceName("hazelcast");
MapConfig mapCfg = new MapConfig();
mapCfg.setName("t1");
mapCfg.setBackupCount(2);
mapCfg.setTimeToLiveSeconds(300);
MapStoreConfig mapStoreCfg = new MapStoreConfig();
mapStoreCfg.setClassName(PersonMapStore.class.getName()).setEnabled(true);
mapCfg.setMapStoreConfig(mapStoreCfg);
config.addMapConfig(mapCfg);
return config;
}
How to set MapConfig for hazelcast jet.
JetInstance jet = Jet.newJetInstance(null);
IMap<String, Person> map2 = jet .getMap("t1");
The method Jet.newJetInstance(JetConfig) takes JetConfig object, which has setHazelcastConfig method to set config for IMDG:
#Bean
public static Config config() {
...
}
#Bean
public static JetConfig jetConfig() {
JetConfig jetConfig = new JetConfig();
jetConfig.setHazelcastConfig(config());
...
return jetConfig;
}

Hazelcast cluster not starting

I'm not able to see members added to the cluster when I start from multiple ports. Below is just the basic configuration. Each seems to have its own port.
#SpringBootApplication
#Configuration
public class HazelcastApplication {
public static void main(String[] args) {
SpringApplication.run(HazelcastApplication.class, args);
}
#Bean(destroyMethod = "shutdown")
public HazelcastInstance createStorageNode() throws Exception {
return Hazelcast.newHazelcastInstance();
}
}
Members [1] {
Member [169.254.137.152]:5702 this
}
Members [1] {
Member [169.254.137.152]:5701 this
}
It's possible that you have multiple network interfaces on the machine you're running on while running multicast. Modify your above method to:
#Bean(destroyMethod = "shutdown")
public HazelcastInstance createStorageNode() throws Exception {
Config config = new Config();
JoinConfig joinConfig = config.getNetworkConfig().getJoin();
joinConfig.getMulticastConfig().setEnabled(false);
joinConfig.getTcpIpConfig().setEnabled(true)
.getMembers()
.add("127.0.0.1");
//.add("169.254.137.152"); // or this
Hazelcast.newHazelcastInstance(config);
}

Using Hazelcast, how can i create event or catch if member is shutdown and print message?

This is my source,how can i print message if some of the members is shut down for some reason?I think i can some event or some kind of action listener but how...
import com.hazelcast.core.*;
import com.hazelcast.config.*;
import java.util.Map;
/** * * #author alvel */ public class ShutDown {
public static void main(String[] args) {
Config cfg = new Config();
HazelcastInstance memberOne = Hazelcast.newHazelcastInstance(cfg);
HazelcastInstance memberTwo = Hazelcast.newHazelcastInstance(cfg);
Map<Integer, String> customerMap = memberOne.getMap("customers");
customerMap.put(1, "google");
customerMap.put(2, "apple");
customerMap.put(3, "yahoo");
customerMap.put(4, "microsoft");
System.out.println("Hazelcast Nodes in this cluster"+Hazelcast.getAllHazelcastInstances().size());
memberOne.shutdown();
System.out.println("Hazelcast Nodes in this cluster After shutdown"+Hazelcast.getAllHazelcastInstances().size());
Map<Integer, String> customerRestored = memberTwo.getMap("customers");
for(String val:customerRestored.values()){
System.out.println("-"+val);
}
} }
Try this, it adds a few lines into your code and a new class
public class ShutDown {
static {
// ONLY TEMPORARY
System.setProperty("hazelcast.logging.type", "none");
}
public static void main(String[] args) {
Config cfg = new Config();
HazelcastInstance memberOne = Hazelcast.newHazelcastInstance(cfg);
//ADDED TO MEMBER ONE
memberOne.getCluster().addMembershipListener(new ShutDownMembershipListener());
HazelcastInstance memberTwo = Hazelcast.newHazelcastInstance(cfg);
//ADDED TO MEMBER TWO
memberTwo.getCluster().addMembershipListener(new ShutDownMembershipListener());
Map<Integer, String> customerMap = memberOne.getMap("customers");
customerMap.put(1, "google");
customerMap.put(2, "apple");
customerMap.put(3, "yahoo");
customerMap.put(4, "microsoft");
System.out.println("Hazelcast Nodes in this cluster"+Hazelcast.getAllHazelcastInstances().size());
memberOne.shutdown();
System.out.println("Hazelcast Nodes in this cluster After shutdown"+Hazelcast.getAllHazelcastInstances().size());
Map<Integer, String> customerRestored = memberTwo.getMap("customers");
for(String val:customerRestored.values()){
System.out.println("-"+val);
}
}
static class ShutDownMembershipListener implements MembershipListener {
#Override
public void memberAdded(MembershipEvent membershipEvent) {
System.out.println(this + membershipEvent.toString());
}
#Override
public void memberAttributeChanged(MemberAttributeEvent arg0) {
}
#Override
public void memberRemoved(MembershipEvent membershipEvent) {
System.out.println(this + membershipEvent.toString());
}
}
}
The line System.setProperty("hazelcast.logging.type", "none") is just for testing to make it simpler to see what is happening.

Hazelcast write-behind with map.replace

It seems that the MapStore (write-behind mode) does not work properly when replacing some map items.
I expected that map items which have been replaced, are not processed anymore.
I am using Hazelcast 3.2.5
Did i miss something here?
Please see Server Test Class, Client Test Class and Output as well to demonstrate the problem.
Server Class:
public class HazelcastInstanceTest {
public static void main(String[] args) {
Config cfg = new Config();
MapConfig mapConfig = new MapConfig();
MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setEnabled(true);
mapStoreConfig.setClassName("com.test.TestMapStore");
mapStoreConfig.setWriteDelaySeconds(15);
mapConfig.setMapStoreConfig(mapStoreConfig);
mapConfig.setName("customers");
cfg.addMapConfig(mapConfig);
HazelcastInstance instance = Hazelcast.newHazelcastInstance(cfg);
}
}
MapStore Impl Class
public class TestMapStore implements MapStore {
#Override
public Object load(Object arg0) {
System.out.println("--> LOAD");
return null;
}
#Override
public Map loadAll(Collection arg0) {
System.out.println("--> LOAD ALL");
return null;
}
#Override
public Set loadAllKeys() {
System.out.println("--> LOAD ALL KEYS");
return null;
}
#Override
public void delete(Object arg0) {
System.out.println("--> DELETE");
}
#Override
public void deleteAll(Collection arg0) {
System.out.println("--> DELETE ALL");
}
#Override
public void store(Object arg0, Object arg1) {
System.out.println("--> STORE " + arg1.toString());
}
#Override
public void storeAll(Map arg0) {
System.out.println("--> STORE ALL");
}
}
Client Class
public class HazelcastClientTest {
public static void main(String[] args) throws Exception {
ClientConfig clientConfig = new ClientConfig();
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
IMap mapCustomers = client.getMap("customers");
System.out.println("Map Size:" + mapCustomers.size());
mapCustomers.put(1, "Item A");
mapCustomers.replace(1, "Item B");
mapCustomers.replace(1, "Item C");
System.out.println("Map Size:" + mapCustomers.size());
}
}
Client Output (which is ok):
Map Size:0
Map Size:1
Server Output (which is not ok, as i suppose. I expected only item C)
--> LOAD ALL KEYS
--> LOAD
--> STORE Item A
--> STORE ALL
--> STORE Item B
--> STORE Item C
Any help is appreciate.
Many thanks

ServiceStack - Instance per http request - (Console application) - (Ninject)

I'm having a problems configuring instance per http request in service stack. I am running a console app and trying to use Ninject as my container.
I would like an instance to last for 1 full request. I would like to know how I can configure this.
I would like an example showing how I could start the lifetime of the dependency when the request starts and how to dispose that same instance when the request ends.
Could anyone give me an example of how to do this in autofac and ninject.
public class OutboundHost : AppHostHttpListenerBase
{
public OutboundHost() : base("StarterTemplate HttpListener", typeof(SmsService).Assembly) { }
protected override void ProcessRequest(System.Net.HttpListenerContext context)
{
Console.WriteLine("Begin Processing request");
base.ProcessRequest(context);
}
public override void Start(string urlBase)
{
base.Start(urlBase);
}
public override void OnEndRequest()
{
Console.WriteLine("End processing request");
base.OnEndRequest();
}
public override void Configure(Funq.Container container) {
IKernel kernel = new StandardKernel();
RegisterEntityFrameworkNinject(kernel);
RegisterDependenciesNinject(kernel);
LogManager.LogFactory = new ConsoleLogFactory();
Console.WriteLine("Starting Container");
container.Register<ICacheClient>(new MemoryCacheClient());
container.RegisterValidators(typeof(SendSmsValidator).Assembly);
Plugins.Add(new RazorFormat());
Plugins.Add(new ValidationFeature());
Plugins.Add(new RequestLogsFeature() { RequiredRoles = new string[] { } });
Plugins.Add(new AuthFeature(() => new AuthUserSession(), new IAuthProvider[] {new CustomCredentialsAuthProvider(), }));
SetConfig(new EndpointHostConfig
{
CustomHttpHandlers = {
{ System.Net.HttpStatusCode.NotFound, new RazorHandler("/notfound") }
}
});
InitializeLookupTables(kernel);
container.Adapter = new NinjectIocAdapter(kernel);
}
private void RegisterDependenciesNinject(IKernel kernel)
{
kernel.Bind<ILog>().ToMethod(c => LogManager.LogFactory.GetLogger(typeof (ConsoleLogger))).InSingletonScope();
kernel.Bind<ISendSmsIntenda>().To<SendSmsIntenda>();
}
private void RegisterEntityFrameworkNinject(IKernel kernel)
{
kernel.Bind<IEntityFrameworkUnitOfWork>().To<EntityFrameworkUnitOfWork>();
kernel.Bind<IUnitOfWork>().To<EntityFrameworkUnitOfWork>();
kernel.Bind<IRepository>().To<Repository>();
kernel.Bind<ILookupTableFactory>().To<EntityFrameworkLookupTableFactory>();
kernel.Bind<IDataQuery>().To<DatabaseQuery>();
kernel.Bind<IContextFactory>().To<DefaultContextFactory<OutboundContext>>().InSingletonScope()
.WithConstructorArgument("connectionStringName", GetConnectionString());
}
private string GetConnectionString()
{
return ConfigurationManager.ConnectionStrings["Outbound"].ConnectionString;
}
private void InitializeLookupTables(IKernel kernel)
{
using (var kernelContext = kernel.BeginBlock())
{
var contextFactory = kernelContext.Get<IContextFactory>();
var frameworkContext = kernelContext.Get<IContextFactory>().GetContext() as IFrameworkContext;
var unitOfWork = kernelContext.Get<IUnitOfWork>();
contextFactory.GetContext().RegisterLookupTable<SmsStatusType, SmsStatusEnum>();
frameworkContext.SaveLookupTableChanges();
unitOfWork.Commit();
}
}
}
public class NinjectIocAdapter : IContainerAdapter
{
private readonly IKernel kernel;
private readonly Container container;
public NinjectIocAdapter(IKernel kernel, Funq.Container container)
{
this.kernel = kernel;
this.container = container;
}
public T Resolve<T>()
{
return this.kernel.Get<T>();
}
public T TryResolve<T>()
{
return this.kernel.TryGet<T>();
}
}

Resources