I would like to set up the java driver to collect JMX metrics using jConsole or jmxterm. How do I go about exposing those mbean metrics in the Cassandra java driver? In this case, I'm using the 4.14 java driver.
Here's a good link that helped me with the 4.14 driver:
https://docs.datastax.com/en/developer/java-driver/4.14/manual/core/metrics/
Ultimately, the JMX metrics look like the following in jConsole:
And here's my application code:
pom.xml:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.example.cassandra</groupId>
<artifactId>testing-connection</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>testing-connection</name>
<properties>
<java.version>1.8</java.version>
<driver.version>4.14.1</driver.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-core</artifactId>
<version>${driver.version}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-jmx</artifactId>
<version>4.1.2</version>
</dependency>
</dependencies>
</project>
application.conf:
datastax-java-driver
{
basic {
contact-points = [ "10.101.36.152:9042" ]
load-balancing-policy {
local-datacenter = "SearchGraphAnalytics"
}
}
advanced.metrics {
session.enabled = [ bytes-sent,bytes-received,connected-nodes,cql-requests,cql-client-timeouts,cql-prepared-cache-size,throttling.delay,throttling.queue-size,throttling.errors,continuous-cql-requests,graph-requests,graph-client-timeouts ]
node.enabled = [ pool.open-connections,pool.available-streams,pool.in-flight,pool.orphaned-streams,bytes-sent,bytes-received,cql-messages,errors.request.unsent,errors.request.aborted,errors.request.write-timeouts,errors.request.read-timeouts,errors.request.unavailables,errors.request.others,retries.total,retries.aborted,retries.read-timeout,retries.write-timeout,retries.unavailable,retries.other,ignores.total,ignores.aborted,ignores.read-timeout,ignores.write-timeout,ignores.unavailable,ignores.other,speculative-executions,errors.connection.init,errors.connection.auth,graph-messages ]
}
}
Main class
package com.example.cassandra;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.CqlSessionBuilder;
import com.datastax.oss.driver.api.core.metadata.Metadata;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import java.util.ArrayList;
import com.codahale.metrics.jmx.JmxReporter;
import com.codahale.metrics.MetricRegistry;
public class TestingConnections {
private CqlSession session;
static String keyspace = "keyspace1";
static String table = "names";
public void connect() {
CqlSessionBuilder builder = CqlSession.builder();
session = builder.build();
Metadata metadata = session.getMetadata();
System.out.printf("Connected to cluster: %s\n", metadata.getClusterName());
}
public CqlSession getSession() {
return this.session;
}
public void getData(String keyspace, String table) {
ResultSet results = session.execute("select * from " + keyspace + "." + table);
ArrayList<String> first_names = new ArrayList<String>();
results.forEach(row -> first_names.add(row.getString("first")));
first_names.forEach(first_name -> System.out.println(first_name));
}
public void close() {
session.close();
}
public void registerJMX() {
MetricRegistry registry = session.getMetrics()
.orElseThrow(() -> new IllegalStateException("Metrics are disabled"))
.getRegistry();
JmxReporter reporter =
JmxReporter.forRegistry(registry)
.inDomain("com.datastax.oss.driver")
.build();
reporter.start();
}
public static void main(String[] args) {
System.out.printf("Connecting to client");
TestingConnections client = new TestingConnections();
client.connect();
client.registerJMX();
client.getData(keyspace, table);
}
}
That did work to generate the metrics as shown in the first screenshot. I haven't tried with metric packages other than the dropwizard (which is the default).
Also note, I am not calling client.close() because I want my session to stay open so that I can connect jConsole to my java application.
Related
org.graalvm.polyglot.PolyglotException: java.lang.AssertionError
com.oracle.truffle.polyglot.PolyglotList$Cache.lookup(PolyglotList.java:180)
com.oracle.truffle.polyglot.PolyglotList.<init>(PolyglotList.java:73)
com.oracle.truffle.polyglot.PolyglotList.create(PolyglotList.java:96)
com.oracle.truffle.polyglot.PolyglotHostAccess.toList(PolyglotHostAccess.java:107)
com.oracle.truffle.host.HostToTypeNode.asJavaObject(HostToTypeNode.java:384)
com.oracle.truffle.host.HostToTypeNode.convertImpl(HostToTypeNode.java:194)
com.oracle.truffle.host.HostToTypeNode.doCached(HostToTypeNode.java:118)
It throws the above error when it is run using below master runner and fails to run the java method.
package tests;
import com.intuit.karate.Results;
import com.intuit.karate.Runner;
import net.masterthought.cucumber.Configuration;
import net.masterthought.cucumber.ReportBuilder;
import org.apache.commons.io.FileUtils;
import org.junit.jupiter.api.Test;
import reporting.ExtentReport;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import static org.junit.jupiter.api.Assertions.assertEquals;
class MasterTestRunner {
// this will run all *.feature files
/*#Karate.Test
Karate testAll() {
return Karate.run().relativeTo(getClass());
}*/
//Karate Parallel Runner
#Test
public void testParallel() {
final Results results = Runner.path("classpath:tests/features").hook(new ExtentReport()).tags("#test")
.outputCucumberJson(true).outputJunitXml(true).parallel(1);
assertEquals(0, results.getFailCount(), results.getErrorMessages());
}
//Boiler Plate for Cucumber report generation
public static void generateReport(final String karateOutputPath) {
final Collection<File> jsonFiles = FileUtils.listFiles(new File(karateOutputPath), new String[]{"json"}, true);
final ArrayList<String> jsonPaths = new ArrayList<>(jsonFiles.size());
jsonFiles.forEach(file -> jsonPaths.add(file.getAbsolutePath()));
final Configuration config = new Configuration(new File("target"), "Karate test");
System.out.println("config report " + config.getBuildNumber());
final ReportBuilder reportBuilder = new ReportBuilder(jsonPaths, config);
reportBuilder.generateReports();
}
}
when it is run individually in feature file it is successful.
The carate test is like below.
Feature: karate testing
#test
Scenario: Karate test case
* def acts = [ { "label": "BUSINESS","name": "David","id": "u374892","type": "DIVISION"},{"label": "Division","name":"MILLER","id": "u236478","type": "BUSINESS"}]
* def data1 = [{"UserName": "ABENANTE","UserID": "u109238","Role": "BUSINESS"},{"UserName": "Louis","UserID": "u784784","Role": "DIVISION"}]
* def desk = utils.returnIds(data1, 2, acts, "DESK")
* print desk
The java method from utils starts like
public static List<Map<String, Object>> returnIds(List<Map<String, Object>> listOfAllIds, int numberOfIds, List<Map<String, Object>> listOfIds, String role)
For Karate the dependencies are
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven.compiler.version>3.6.0</maven.compiler.version>
<karate.version>1.1.0</karate.version>
</properties>
<dependencies>
<!-- For Karate Begin -->
<dependency>
<groupId>com.intuit.karate</groupId>
<artifactId>karate-junit5</artifactId>
<version>${karate.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.intuit.karate</groupId>
<artifactId>karate-gatling</artifactId>
<version>${karate.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
I am not sure what might cause this issue.
I am migrating off of mongobee to mongock so we can use Atlas. I've followed the commits on the suggested changes that have been merged into master and have modified CloudDatabaseConfiguration, DatabaseConfiguration, and InitialSetupMigration classes. I've also updated the pom to import the mongock 4.1.17 dependencies.
Running the app there seems to be no issues. I've tested the change log and everything operates as it should. When i run my tests, however, i am getting an error stating it cannot find the class org/springframework/data/mongodb/MongoDatabaseFactory.
Caused by: org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'mongockInitializingBeanRunner' defined in class path resource [com/ioi/helpdesk/gateway/config/DatabaseConfiguration.class]: Invocation of init method failed; nested exception is java.lang.NoClassDefFoundError: org/springframework/data/mongodb/MongoDatabaseFactory
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1796)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:595)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:517)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:323)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:226)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:321)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
<dependency>
<groupId>com.github.cloudyrock.mongock</groupId>
<artifactId>mongock-spring-v5</artifactId>
<version>4.1.17</version>
</dependency>
<dependency>
<groupId>com.github.cloudyrock.mongock</groupId>
<artifactId>mongodb-springdata-v3-driver</artifactId>
<version>4.1.17</version>
</dependency>
I have not changed the starter data dependency
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-mongodb</artifactId>
</dependency>
#Configuration
#EnableMongoRepositories("com.ioi.helpdesk.gateway.repository")
#Profile("!" + JHipsterConstants.SPRING_PROFILE_CLOUD)
#Import(value = MongoAutoConfiguration.class)
#EnableMongoAuditing(auditorAwareRef = "springSecurityAuditorAware")
public class DatabaseConfiguration {
private final Logger log = LoggerFactory.getLogger(DatabaseConfiguration.class);
#Bean
public ValidatingMongoEventListener validatingMongoEventListener() {
return new ValidatingMongoEventListener(validator());
}
#Bean
public LocalValidatorFactoryBean validator() {
return new LocalValidatorFactoryBean();
}
#Bean
public MongoCustomConversions customConversions() {
List<Converter<?, ?>> converters = new ArrayList<>();
converters.add(DateToZonedDateTimeConverter.INSTANCE);
converters.add(ZonedDateTimeToDateConverter.INSTANCE);
return new MongoCustomConversions(converters);
}
#Bean
public MongockSpring5.MongockInitializingBeanRunner mongockInitializingBeanRunner(ApplicationContext springContext,
MongoTemplate mongoTemplate,
#Value("${mongock.lockAcquiredForMinutes:5}") long lockAcquiredForMinutes,
#Value("${mongock.maxWaitingForLockMinutes:3}") long maxWaitingForLockMinutes,
#Value("${mongock.maxTries:3}") int maxTries) {
try {
log.info("INITIALIZING MONGOCK!");
SpringDataMongo3Driver driver = SpringDataMongo3Driver.withLockSetting(mongoTemplate, lockAcquiredForMinutes, maxWaitingForLockMinutes, maxTries);
MongockSpring5.MongockInitializingBeanRunner runner = MongockSpring5.builder()
.setDriver(driver)
.addChangeLogsScanPackage("com.ioi.helpdesk.gateway.config.dbmigrations")
.setSpringContext(springContext)
.buildInitializingBeanRunner();
log.info("MONGOCK INITIALIZED!");
return runner;
} catch(Exception e) {
log.info("Error during Mongock initalization - " + ExceptionUtils.getStackTrace(e));
}
return null;
}
}
Am I missing a test dependency or incorrectly included one?
I created the following test class using Cassandra.
private static String server_ip = "127.0.0.1";
private static String keyspace = "hr";
private static Cluster cluster = null;
private static Session session = null;
public static void main (String [] args)
{
if(cluster!=null) return;
cluster = Cluster.builder().addContactPoints(server_ip).withPort(9042).build();
final Metadata metadata = cluster.getMetadata();
String msg = String.format("Connected to cluster: %s", metadata.getClusterName());
System.out.println(msg);
System.out.println("List of hosts");
for (final Host host : metadata.getAllHosts())
{
msg = String.format("Datacenter: %s; Host: %s; Rack: %s",
host.getDatacenter(),
host.getAddress(),
host.getRack());
System.out.println(msg);
}
session = cluster.connect(keyspace);
}
(Cluster.java:407)
at samples.SampleB.main(SampleB.java:28)
After googling, I try all the proposing solution without success. Could you please help me solving this issue?. Thanks a lot.
you should verify those dependencies:
<!-- Apache Cassandra Datastax's CQL driver. -->
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-mapping</artifactId>
<version>3.1.0</version>
</dependency>
HTH
I have two clusters running Kafka and spark separately. I want to create a kafka-topic from spark cluster. I have noticed to create a topic we need to invoke Kafka-topics.sh which wont be available in spark cluster. command should invoked through shell.
eg: /kafka_topics.sh --zookeeper :2181 --create --topic test_topic
This script should be called from spark cluster and it should get executed on Kafka cluster.
Can anyone help me?
You can have java api and maven dependencies(kafka and zookeeper) to create kafka topic as below. You can invoke the code from the code where you are submitting spark application.
<dependency>
<groupId>com.101tec</groupId>
<artifactId>zkclient</artifactId>
<version>0.3</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
import java.util.Properties;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkInterruptedException;
import kafka.utils.ZKStringSerializer$;
import kafka.admin.AdminUtils;
public final class KafkaUtils {
public static void main(String[] args) throws Exception {
KafkaUtils.createTopic("x.x.x.x:2181,y.y.y.y:2181", "topicName", 1, 0, new Properties());
}
public static void createTopic(String zkHosts, String topicName, int numberOfPartition, int replicationFactor, Properties properties) {
ZkClient zkClient = null;
try {
zkClient = getZkClient(zkHosts);
AdminUtils.createTopic(zkClient, topicName, numberOfPartition, replicationFactor, properties);
} catch (Exception exception) {
exception.printStackTrace();
} finally {
if (zkClient != null) {
try {
zkClient.close();
} catch (ZkInterruptedException ex) {
ex.printStackTrace();
}
}
}
}
private static ZkClient getZkClient(String zkHosts) {
ZkClient zkClient = null;
// Zookeeper sessionTimeoutMs
final int sessionTimeoutMs = 10000;
// Zookeeper connectionTimeoutMs
final int connectionTimeoutMs = 10000;
zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
return zkClient;
}
}
Here x.x.x.x and y.y.y.y are zk cluster hosts for kafka. Hope this helps.
So I am running in to issues connecting to a single node cassandra cluster using spring data-cassandra. I am using the docker image found at: https://hub.docker.com/_/cassandra/
using a docker-compose with the following environment variables set:
cassandra_n1:
image: cassandra:latest
ports:
- "9042:9042"
- "9160:9160"
hostname: cassandra_n1
environment:
CASSANDRA_CLUSTER_NAME: "mycluster"
CASSANDRA_ENDPOINT_SNITCH: "PropertyFileSnitch"
CASSANDRA_DC: "DC1"
CASSANDRA_RACK: "R1"
then after this starts I try to connect to it using my spring boot application which is as simple as:
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
#SpringBootApplication
public class MvcApplication {
public static void main(String[] args) {
SpringApplication.run(MvcApplication.class);
}
}
which scans for a configuration file which is:
#Configuration
#PropertySource(value = { "classpath:cassandra.properties" })
#EnableCassandraRepositories(basePackages = { "myproject.repository" })
public class CassandraConfig {
private static final Logger LOG = LoggerFactory.getLogger(CassandraConfig.class);
#Autowired
private Environment env;
#Bean
public CassandraClusterFactoryBean cluster() {
CassandraClusterFactoryBean cluster = new CassandraClusterFactoryBean();
cluster.setContactPoints(env.getProperty("cassandra.contactpoints"));
cluster.setPort(Integer.parseInt(env.getProperty("cassandra.port")));
return cluster;
}
#Bean
public CassandraMappingContext mappingContext() {
return new BasicCassandraMappingContext();
}
#Bean
public CassandraConverter converter() {
return new MappingCassandraConverter(mappingContext());
}
#Bean
public CassandraSessionFactoryBean session() throws Exception {
CassandraSessionFactoryBean session = new CassandraSessionFactoryBean();
session.setCluster(cluster().getObject());
session.setKeyspaceName(env.getProperty("cassandra.keyspace"));
session.setConverter(converter());
session.setSchemaAction(SchemaAction.NONE);
return session;
}
#Bean
public CassandraOperations cassandraTemplate() throws Exception {
return new CassandraTemplate(session().getObject());
}
}
that looks at the property file cassandra.properties which is:
cassandra.contactpoints=192.168.99.100
cassandra.port=9042
cassandra.keyspace=mykeyspace
I am using docker-machine as a docker daemon which has a address of 192.168.99.100
in my pom i'm using the dependencies:
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-cassandra</artifactId>
<version>1.0.0.RELEASE</version>
<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-expression</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-expression</artifactId>
<version>4.1.7.RELEASE</version>
</dependency>
After i build and run my application, the application fails to connect to cassandra showing a message:
Caused by: com.datastax.driver.core.exceptions.NoHostAvailableException:
All host(s) tried for query failed (tried: /192.168.99.100:9042 (com.datastax.driver.core.ConnectionException:
[/192.168.99.100:9042] Unexpected error during transport initialization (com.datastax.driver.core.TransportException:
[/192.168.99.100:9042] Unexpected exception triggered (java.lang.IndexOutOfBoundsException:
Not enough readable bytes - Need 4, maximum is 0))))
I have tried to set the listen_address, broadcast_address and the rpc_address to the docker daemon ip but have not had success.
Any help would be appriciated