firefox.NotConnectedException Unable to connect on port 7055 after 45000 - linux

When I tried to run selenium test using Xvfb i am getting this below error
org.openqa.selenium.firefox.NotConnectedException: Unable to connect to host localhost.localdomain on port 7055 after 45000 ms. Firefox console output:
Error: cannot open display: 1
at org.openqa.selenium.firefox.internal.NewProfileExtensionConnection.start(NewProfileExtensionConnection.java:113)
at org.openqa.selenium.firefox.FirefoxDriver.startClient(FirefoxDriver.java:271)
at org.openqa.selenium.remote.RemoteWebDriver.<init>(RemoteWebDriver.java:119)
at org.openqa.selenium.firefox.FirefoxDriver.<init>(FirefoxDriver.java:216)
at org.openqa.selenium.firefox.FirefoxDriver.<init>(FirefoxDriver.java:211)
at org.openqa.selenium.firefox.FirefoxDriver.<init>(FirefoxDriver.java:128)
at com.gtech.automation.uk.webdriver.WebDriverFactory.getWebDriver(WebDriverFactory.java:41)
at com.gtech.automation.uk.dashboard.DashboardImpl.getWebDriver(DashboardImpl.java:53)
at com.gtech.automation.uk.dashboard.DashboardImpl.goHome(DashboardImpl.java:91)
at com.gtech.automation.uk.dashboard.steps.footer.ConnectionInfoSteps.isConnectionClosed(ConnectionInfoSteps.java:68)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:95)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:56)
at java.lang.reflect.Method.invoke(Method.java:620)
at cucumber.runtime.Utils$1.call(Utils.java:34)
at cucumber.runtime.Timeout.timeout(Timeout.java:13)
at cucumber.runtime.Utils.invoke(Utils.java:30)
at cucumber.runtime.java.JavaStepDefinition.execute(JavaStepDefinition.java:35)
at cucumber.runtime.StepDefinitionMatch.runStep(StepDefinitionMatch.java:37)
at cucumber.runtime.Runtime.runStep(Runtime.java:298)
at cucumber.runtime.model.StepContainer.runStep(StepContainer.java:44)
at cucumber.runtime.model.StepContainer.runSteps(StepContainer.java:39)
at cucumber.runtime.model.CucumberScenario.run(CucumberScenario.java:48)
at cucumber.runtime.junit.ExecutionUnitRunner.run(ExecutionUnitRunner.java:91)
at cucumber.runtime.junit.FeatureRunner.runChild(FeatureRunner.java:63)
at cucumber.runtime.junit.FeatureRunner.runChild(FeatureRunner.java:18)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at cucumber.runtime.junit.FeatureRunner.run(FeatureRunner.java:70)
at cucumber.api.junit.Cucumber.runChild(Cucumber.java:93)
at cucumber.api.junit.Cucumber.runChild(Cucumber.java:37)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:238)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:63)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:236)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:53)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:229)
at org.junit.runners.ParentRunner.run(ParentRunner.java:309)
at cucumber.api.junit.Cucumber.run(Cucumber.java:98)
at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:283)
at org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:173)
at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:153)
at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:128)
at org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:203)
at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:155)
at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:103)
These are the configuration i have used
Selenium version :2.53.0
Firefox version 46
I have checked the port 7055 its not active
netstat -ntlp | grep 7055
however Xvfb is running on the linux server
netstat -ntlp | grep Xvfb
tcp 0 0 0.0.0.0:6001 0.0.0.0:* LISTEN 31384/Xvfb
I tried running the Xvfb server with port number its not working
Xvfb :1 -screen 0 1024x768x16 -nolisten inet6 -port 7055 -from localhost.localdomain
Please help on this
Code
public static WebDriver getWebDriver(WebDriverImplementation implementation) {
switch (implementation) {
case FIREFOX:
return new FirefoxDriver(getFirefoxCapabilities());
case FIREBUG: {
FirefoxProfile firefoxProfile = new FirefoxProfile();
String seleniumPath = System.getProperty("selenuim.webdriver.path");
try {
firefoxProfile.addExtension(new File(seleniumPath + "/" + FIREBUG_FILENAME));
}catch (NullPointerException e)
{
logger.log(Level.SEVERE, null, e);
} catch(Exception e) {
System.out.println("Unexcepted Exception");
logger.log(Level.SEVERE, null, e);
}
firefoxProfile.setPreference("extensions.firebug.currentVersion", FIREBUG_VERSION);
firefoxProfile.setPreference(seleniumPath, seleniumPath);
DesiredCapabilities capabilities = getFirefoxCapabilities();
capabilities.setCapability(FirefoxDriver.PROFILE, firefoxProfile);
return new FirefoxDriver(capabilities);
}
case SHARED_FIREFOX:
return new SharedFireFoxDriver();
case MARIONETTE:
return new MarionetteDriver(getFirefoxCapabilities());
case INTERNET_EXPLORER:
return new InternetExplorerDriver();
default:
String message = "Unable to find a web driver implemention for : " + implementation.name();
logger.log(Level.SEVERE, message);
throw new UnsupportedOperationException(message);
}
}

Related

How to setup a distributed eventbus in a vertx Hazelcast Cluster?

Here is the sender verticle
I have set multicast enabled and set the public host to my machines ip address
VertxOptions options = new VertxOptions()
.setClusterManager(ClusterManagerConfig.getClusterManager());
EventBusOptions eventBusOptions = new EventBusOptions()
.setClustered(true)
.setClusterPublicHost("10.10.1.160");
options.setEventBusOptions(eventBusOptions);
Vertx.clusteredVertx(options, res -> {
if (res.succeeded()) {
Vertx vertx = res.result();
vertx.deployVerticle(new requestHandler());
vertx.deployVerticle(new requestSender());
EventBus eventBus = vertx.eventBus();
eventBus.send("some.address","hello",reply -> {
System.out.println(reply.toString());
});
} else {
LOGGER.info("Failed: " + res.cause());
}
});
}
here's the reciever verticle
VertxOptions options = new VertxOptions().setClusterManager(mgr);
options.setEventBusOptions(new EventBusOptions()
.setClustered(true)
.setClusterPublicHost("10.10.1.174") );
Vertx.clusteredVertx(options, res -> {
if (res.succeeded()) {
Vertx vertx1 = res.result();
System.out.println("Success");
EventBus eb = vertx1.eventBus();
System.out.println("ready");
eb.consumer("some.address", message -> {
message.reply("hello hello");
});
} else {
System.out.println("Failed");
}
});
I get this result when i run both main verticles , so the verticles are detected by hazelcast and a connection is established
INFO: [10.10.1.160]:33001 [dev] [3.10.5] Established socket connection between /10.10.1.160:33001 and /10.10.1.174:35725
Jan 11, 2021 11:45:10 AM com.hazelcast.internal.cluster.ClusterService
INFO: [10.10.1.160]:33001 [dev] [3.10.5]
Members {size:2, ver:2} [
Member [10.10.1.160]:33001 - 51b8c249-6b3c-4ca8-a238-c651845629d8 this
Member [10.10.1.174]:33001 - 1cba1680-025e-469f-bad6-884111313672
]
Jan 11, 2021 11:45:10 AM com.hazelcast.internal.partition.impl.MigrationManager
INFO: [10.10.1.160]:33001 [dev] [3.10.5] Re-partitioning cluster data... Migration queue size: 271
Jan 11, 2021 11:45:11 AM com.hazelcast.nio.tcp.TcpIpAcceptor
But when the event-bus tries to send a message to given address i encounter this error is this a problem with event-bus configuration?
Jan 11, 2021 11:59:57 AM io.vertx.core.eventbus.impl.clustered.ConnectionHolder
WARNING: Connecting to server 10.10.1.174:39561 failed
io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: /10.10.1.174:39561
at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:716)
at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:327)
at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340)
at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:665)
at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:612)
at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:529)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:491)
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:905)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.net.ConnectException: Connection refused
... 11 more
In Vert.x 3, the cluster host and cluster public host default to localhost.
If you only change the cluster public host in VertxOptions, Vert.x will bind EventBus transport servers to localhost while telling other nodes to connect to the public host.
This kind of configuration is needed when running Vert.x on some cloud providers, but in most cases you only need to set the cluster host (and then the public host will default to its value):
EventBusOptions eventBusOptions = new EventBusOptions()
.setClustered(true)
.setHost("10.10.1.160");

How to capture data change in yugabyte db?

terminal 1:
postgres=# \c yugastore
You are now connected to database "yugastore" as user "postgres".
yugastore=# select count(*) from yugastore.users;
count
-------
2500
(1 row)
yugastore=# delete from yugastore.users;
DELETE 2500
(After starting insertion script at terminal 2)
yugastore=# select count(*) from yugastore.users;
ERROR: Query error: Restart read required at: { read: { physical: 1580057095845877 } local_limit: { physical: 1580057095880226 } global_limit: <min> in_txn_limit: <max> serial_no: 0 }
yugastore=# select count(*) from yugastore.users;
ERROR: Query error: Restart read required at: { read: { physical: 1580057098605539 } local_limit: { physical: 1580057098715271 } global_limit: <min> in_txn_limit: <max> serial_no: 0 }
terminal2:
yugastore.users table is created and being populated.
time: 11:44:31.796 cumulative records: 100
time: 11:44:32.608 cumulative records: 200
time: 11:44:32.909 cumulative records: 300
time: 11:44:33.213 cumulative records: 400
time: 11:44:33.661 cumulative records: 500
...
time: 11:46:24.710 cumulative records: 18900
time: 11:46:25.137 cumulative records: 19000
time: 11:46:25.606 cumulative records: 19100
terminal 3:
[root#srvr0 ~]# java -jar ./yb_cdc_connector.jar --table_name yugastore.users --master_addrs 127.0.0.1:7100 --log_only
[2020-01-26 11:45:57,844] INFO Starting CDC Kafka Connector... (org.yb.cdc.Main:28)
2020-01-26 11:45:58,201 [INFO|org.yb.cdc.KafkaConnector|KafkaConnector] Creating new YB client...
[2020-01-26 11:46:02,853] INFO Discovered tablet YB Master for table YB Master with partition ["", "") (org.yb.client.AsyncYBClient:1593)
[2020-01-26 11:46:03,724] ERROR [Peer fakeUUID -> 127.0.0.1:9100] Tablet server sent error Invalid argument (yb/rpc/yb_rpc.cc:411): Call on service yb.cdc.CDCService received from Connection (0x0000000005b8e2d0) server 127.0.0.1:46926 => 127.0.0.1:9100 with an invalid method name: CreateCDCStream (org.yb.client.TabletClient:380)
2020-01-26 11:46:03,725 [ERROR|org.yb.cdc.Main|Main] Application ran into error:
org.yb.client.NonRecoverableException: [Peer fakeUUID -> 127.0.0.1:9100] Tablet server sent error Invalid argument (yb/rpc/yb_rpc.cc:411): Call on service yb.cdc.CDCService received from Connection (0x0000000005b8e2d0) server 127.0.0.1:46926 => 127.0.0.1:9100 with an invalid method name: CreateCDCStream
at org.yb.client.TabletClient.decode(TabletClient.java:379)
at org.yb.client.TabletClient.decode(TabletClient.java:98)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:500)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.yb.client.TabletClient.handleUpstream(TabletClient.java:608)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.handler.timeout.ReadTimeoutHandler.messageReceived(ReadTimeoutHandler.java:184)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at org.yb.client.AsyncYBClient$TabletClientPipeline.sendUpstream(AsyncYBClient.java:2002)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:318)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Update1:
After installing yugabyte db 2.0.10.0, error "Restart read required" resolved but no change logs printed:
Error logs:
[root#srvr0 ~]# java -jar ./yb-cdc-connector.jar --table_name yugastore.users --master_addrs 127.0.0.1:7100 --stream_id 1 --log_only
[2020-01-28 08:27:31,101] INFO Starting CDC Kafka Connector... (org.yb.cdc.Main:28)
2020-01-28 08:27:31,154 [INFO|org.yb.cdc.KafkaConnector|KafkaConnector] Creating new YB client...
[2020-01-28 08:27:32,288] INFO Discovered tablet YB Master for table YB Master with partition ["", "") (org.yb.client.AsyncYBClient:1593)
2020-01-28 08:27:32,597 [INFO|org.yb.cdc.KafkaConnector|KafkaConnector] Polling for new tablet ce5115a780224cd0ab8a8e9c1a46b961
2020-01-28 08:27:32,604 [INFO|org.yb.cdc.KafkaConnector|KafkaConnector] Polling for new tablet cca5b30bb7784ae2a8796097d6fd5b2f
2020-01-28 08:27:32,694 [ERROR|org.yb.cdc.Poller|Poller] Invalid Request
2020-01-28 08:27:32,695 [ERROR|org.yb.cdc.Poller|Poller] Invalid Request
[root#srvr0 ~]#
Please help me in resolving the issues.
The read restart issue that you see with select count(*) .. query has been fixed and is available from version 2.0.5.2: https://github.com/yugabyte/yugabyte-db/commit/3212616e351647436f808d4963d229e7881996c8.
Similarly, it seems like you are using an older, deprecated version of the CDC connector. You can get the connector using:
wget -O yb-cdc-connector.jar https://github.com/yugabyte/yb-kafka-connector/blob/master/yb-cdc/yb-cdc-connector.jar?raw=true
And then run:
java -jar ./yb-cdc-connector.jar --table_name yugastore.users --master_addrs 127.0.0.1:7100 --log_only

failed for get of /hbase/hbaseid, code = CONNECTIONLOSS, retries = 6

I am trying to connect spark application with hbase. Below is the configuration I am giving
val conf = HBaseConfiguration.create()
conf.set("hbase.master", "localhost:16010")
conf.setInt("timeout", 120000)
conf.set("hbase.zookeeper.quorum", "2181")
val connection = ConnectionFactory.createConnection(conf)
and below are the 'jps' details:
5808 ResourceManager
8150 HMaster
8280 HRegionServer
5131 NameNode
8076 HQuorumPeer
5582 SecondaryNameNode
2798 org.eclipse.equinox.launcher_1.4.0.v20161219-1356.jar
8623 Jps
5951 NodeManager
5279 DataNode
I have alsotried with hbase master 16010
I am getting below error:
19/09/12 21:49:00 WARN ClientCnxn: Session 0x0 for server null, unexpected error, closing socket connection and attempting reconnect
java.net.SocketException: Invalid argument
at sun.nio.ch.Net.connect0(Native Method)
at sun.nio.ch.Net.connect(Net.java:454)
at sun.nio.ch.Net.connect(Net.java:446)
at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:648)
at org.apache.zookeeper.ClientCnxnSocketNIO.registerAndConnect(ClientCnxnSocketNIO.java:277)
at org.apache.zookeeper.ClientCnxnSocketNIO.connect(ClientCnxnSocketNIO.java:287)
at org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1024)
at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1060)
19/09/12 21:49:00 WARN ReadOnlyZKClient: 0x1e3ff233 to 2181:2181 failed for get of /hbase/hbaseid, code = CONNECTIONLOSS, retries = 4
19/09/12 21:49:01 INFO ClientCnxn: Opening socket connection to server 2181/0.0.8.133:2181. Will not attempt to authenticate using SASL (unknown error)
19/09/12 21:49:01 ERROR ClientCnxnSocketNIO: Unable to open socket to 2181/0.0.8.133:2181
Looks like there is a problem to join zookeeper.
Check first that zookeeper is started on your local host on port 2181.
netstat -tunelp | grep 2181 | grep -i LISTEN
tcp6 0 0 :::2181 :::* LISTEN
In your conf, in hbase.zookeeper.quorum property you have to pass the ip of your zookeeper and not the port (hbase.zookeeper.property.clientPort)
My hbase connector is build with :
val conf = HBaseConfiguration.create()
conf.set("hbase.zookeeper.quorum", "10.80.188.65")
conf.set("hbase.master", "10.80.188.64:60000")
conf.set("hbase.zookeeper.property.clientPort", "2181")
conf.set("zookeeper.znode.parent", "/hbase-unsecure")
val connection = ConnectionFactory.createConnection(conf)

How port is reused by multiple node application process

When I run the same application which uses os port multiple times in java application(example ) I get below exception.
Exception in thread "main" java.net.BindException: Address already in use
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:433)
at sun.nio.ch.Net.bind(Net.java:425)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
node_cluster.js
const cluster = require('cluster'),
cpus = require('os').cpus().length;
cluster.setupMaster({
exec: '3_worker.js'
});
if(cluster.isMaster) {
for(let i = 0; i< cpus; i++) {
cluster.fork();
}
cluster.on('fork', worker => console.log(worker.id + " is forked"));
cluster.on('listening', (worker, address) => console.log(worker.id + "is listening on " + JSON.stringify(address)));
cluster.on('online', worker => console.log(`${worker.id} is online`));
cluster.on('disconnect', worker => console.log(`${worker.id} is disconnected`));
cluster.on('exit', (worker, code, signal) => console.log(`${worker.id} is dead due to ${code} and ${signal}`));
}
3_worker.js
const http = require('http');
http.createServer((req, res) => {
console.log(req.url);
res.writeHead(200);
res.end('hello');
}).listen(8080);
The above programs return result -
3is listening on {"addressType":4,"address":null,"port":8080}
5is listening on {"addressType":4,"address":null,"port":8080}
4is listening on {"addressType":4,"address":null,"port":8080}
7is listening on {"addressType":4,"address":null,"port":8080}
8is listening on {"addressType":4,"address":null,"port":8080}
2is listening on {"addressType":4,"address":null,"port":8080}
1is listening on {"addressType":4,"address":null,"port":8080}
6is listening on {"addressType":4,"address":null,"port":8080}
The output of ps is -
$ ps
PID TTY TIME CMD
4477 ttys001 0:00.16 /bin/bash -l
5219 ttys001 0:00.12 node 3_clustering.js
5220 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5221 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5222 ttys001 0:00.13 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5223 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5224 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5225 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5226 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5227 ttys001 0:00.14 /Users/rajkumar.natarajan/.nvm/versions/node/v8.15.0/b
5794 ttys002 0:00.05 /bin/bash --rcfile /Applications/IntelliJ IDEA.app/Con
6063 ttys003 0:00.03 /Applications/Utilities/iTerm.app/Contents/MacOS/iTerm
6065 ttys003 0:00.04 -bash
How multiple node.js process are able to listen on same port.
Only the master has the port opened. It handles all incoming and outgoing traffic on the port and offloads the traffic to the worker threads for processing.

Node startup failed with error: internal.Node.run - Exception during node startup {} in Corda

Just now I started learning Corda and I am building my custom Cordapp. The nodes are being deployed properly but when I am trying to run the nodes in my custom cordapp they fail to start and closes. The logs and the error shown are given below:
*RPC admin connection address : localhost:10048
E 13:15:07+0000 [main] internal.Node.run - Exception during node startup {}
java.net.BindException: Address already in use: bind
at sun.nio.ch.Net.bind0(Native Method) ~[?:1.8.0_181]
at sun.nio.ch.Net.bind(Unknown Source) ~[?:1.8.0_181]
at sun.nio.ch.Net.bind(Unknown Source) ~[?:1.8.0_181]
at sun.nio.ch.ServerSocketChannelImpl.bind(Unknown Source) ~[?:1.8.0_181]
at io.netty.channel.socket.nio.NioServerSocketChannel.doBind(NioServerSocketChannel.java:128) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:554) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1258) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:501) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:486) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:980) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:250) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:365) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:403) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858) ~[netty-all-4.1.9.Final.jar:4.1.9.Final]
at java.lang.Thread.run(Unknown Source) ~[?:1.8.0_1
81]*
My build.graddle file is given below-
task deployNodes(type: net.corda.plugins.Cordform, dependsOn: ['jar']) {
directory "./build/nodes"
node {
name "O=Notary,L=London,C=GB"
notary = [validating : false]
p2pPort 10006
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
}
node {
name "O=PartyA,L=London,C=GB"
p2pPort 10007
rpcSettings {
address("localhost:10008")
adminAddress("localhost:10048")
}
webPort 10009
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
rpcUsers = [[user: "user1", "password": "test", "permissions": ["ALL"]]]
}
node {
name "O=PartyB,L=New York,C=US"
p2pPort 10010
rpcSettings {
address("localhost:10011")
adminAddress("localhost:10051")
}
webPort 10012
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
rpcUsers = [[user: "user1", "password": "test", "permissions": ["ALL"]]]
}
node {
name "O=PartyC,L=Paris,C=FR"
p2pPort 10013
rpcSettings {
address("localhost:10014")
adminAddress("localhost:10054")
}
webPort 10015
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
rpcUsers = [[user: "user1", "password": "test", "permissions": ["ALL"]]]
}
}
task runExampleClientRPCJava(type: JavaExec) {
classpath = sourceSets.main.runtimeClasspath
main = 'com.example.client.ExampleClientRPC'
args 'localhost:10008'
}
To kill the process in linux using particular port: sudo kill -9 $(sudo lsof -t -i:{yourPortNumber})
To kill process in windows using particular port:
Step 1) netstat -ano | findstr :yourPortNumber
Step 2) taskkill /PID PIDFromPrevious Step /F

Resources