Cassandra 3.x Triigger : Getting "Clustering Key and Its Value" - cassandra

I need to find a way to get not only partion key but also clustering key through Partition object. I am aware of how to get the actual partition key and its value from the object but not "Clustering keys"
Here is what I have tried so far: I have tried using "unfilteredIterator" but that only returns regular columns (not the clustering key/value)
My C* table looks like the following
CREATE TABLE user.foo (
ac_id timeuuid,
mapping_id timeuuid,
country text,
state text,
PRIMARY KEY (ac_id, mapping_id) ) WITH CLUSTERING ORDER BY (mapping_id DESC) ...
My code so far:
public static String getKeyText(Partition update) {
List<Map<String, String>> listOfMaps = new ArrayList<Map<String, String>>();
CFMetaData cfm = update.metadata();
Map<String, String> map = new HashMap<String, String>();
try {
UnfilteredRowIterator it = update.unfilteredIterator();
while (it.hasNext()) {
Unfiltered un = it.next();
Clustering clt = (Clustering) un.clustering();
Iterator<Cell> cells = update.getRow(clt).cells().iterator();
Iterator<ColumnDefinition> columnss = update.getRow(clt).columns().iterator();
while(columnss.hasNext()){
ColumnDefinition columnDef = columnss.next();
Cell cell = cells.next();
}
}
} catch (Exception e) {
}
}
Goal is to get ac_id and mapping_id column names & values
Any help is appreciated

I have solved it with the following:
public static String getKeyText(Partition update) {
List<Map<String, String>> listOfMaps = new ArrayList<Map<String, String>>();
CFMetaData cfm = update.metadata();
Map<String, String> map = new HashMap<String, String>();
String localKey = getKey(update.partitionKey().getKey(), cfm);
// go over all columns and only add those that are clustering and partition keys
List<ColumnDefinition> partitionKeyColumns = cfm.partitionKeyColumns();
for (ColumnDefinition partitionColumn : partitionKeyColumns) {
map.put(partitionColumn.name.toString(), localKey);
}
//Now work on clustering keys ONLY
try {
List<ColumnDefinition> clusteringKeyColumns = cfm.clusteringColumns();
UnfilteredRowIterator it = update.unfilteredIterator();
while (it.hasNext()) {
Unfiltered un = it.next();
Clustering clt = (Clustering) un.clustering();
ByteBuffer[] clusteringKeyValues = clt.getRawValues();
int i = 0;
for (ColumnDefinition column : clusteringKeyColumns) {
map.put(column.name.toString(), cfm.getKeyValidator().getString(clusteringKeyValues[i]));
i++;
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
listOfMaps.add(map);
// Now process Clustering keys
return gson.toJson(listOfMaps);
}
However, I am not sure if this is an optimal way to solve this

Related

How to retrieve data of specific column and set it to edit text in android studio

I am new to database . I have two tables in database one is user
"Create Table user(username text , contact_number text primary key , password text , favourite_fruit text"
and keywords
"Create Table keywords( contact_number text primary key , alarm text , location text )"
I want to retrieve data of column alarm from keywords and set it to Edit text in activity but I am unable to retrieve data of alarm column.
So far I have tried this :
public String getAlarm(String al){
SQLiteDatabase db = this.getReadableDatabase();
Cursor cursor=db.query("keywords",new String[]{"alarm"},"contact_number=?",new String[]{al},null,null,null,null);
String a;
if (cursor.moveToFirst()) {
a = cursor.getString(cursor.getColumnIndex("alarm"));
} else {
a = " ";
}
return a;
}
and this:
public String getAlarm(String al){
SQLiteDatabase db = this.getReadableDatabase(); // to get database in readable format
// Cursor cursor = db.rawQuery("select alarm from keywords where contact_number=? ",new String[]{al});
String a; //storing the required value
if (cursor.moveToFirst())
{
do {
a = cursor.getString(0);// each item will be stored
return a; // returning the required value
}
while(cursor.moveToNext());
}
and this:
public String getAlarm(String al){
SQLiteDatabase db = this.getReadableDatabase(); // to get database in readable format
Cursor cursor = db.rawQuery("select alarm from keywords where contact_number=? ",new String[]{al});
if (cursor != null) {
cursor.moveToFirst();
}
while (cursor.isAfterLast() == false) {
cursor.getString(1); // will fetch you the data
cursor.moveToNext();
}
cursor.close();
return " ";
}
My database file is executing every query of user table but for keywords table it just execute its insertkeys function and no other query regarding keywords table.I dont understand whether the problem is with my query or with my table.So I am posting my database file too
public class DatabaseHelper extends SQLiteOpenHelper {
// constructor
public DatabaseHelper(Context context) {
super(context, "Offline Mobile Finder.db", null, 2);
}
#Override
public void onCreate(SQLiteDatabase db) {
db.execSQL("Create Table user(username text , contact_number text primary key , password text , favourite_fruit text)");
db.execSQL("Create Table keywords( contact_number text primary key , alarm text , location text )");
}
#Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
db.execSQL("drop table if exists user");
db.execSQL("drop table if exists keywords");
onCreate(db);
}
// data of user table
public boolean insert (String username,String contact_no ,String password ,String fav_fruit ){
SQLiteDatabase db = this.getWritableDatabase();
//ContentValues is a name value pair, used to insert or update values into database tables.
// ContentValues object will be passed to SQLiteDataBase objects insert() and update() functions.
// ContentValues contentValues = new ContentValues();
ContentValues contentValues = new ContentValues();
contentValues.put("username",username);
contentValues.put("contact_number",contact_no);
contentValues.put("password",password);
contentValues.put("favourite_fruit",fav_fruit);
long ins = db.insert("user",null,contentValues);
// db.close();
if(ins == -1)
return false;
else
return true;
}
// values insert in keywords table
public boolean insertkeys (String alarm ,String location ,String contact){
SQLiteDatabase db = this.getWritableDatabase();
//ContentValues is a name value pair, used to insert or update values into database tables.
// ContentValues object will be passed to SQLiteDataBase objects insert() and update() functions.
// ContentValues contentValues = new ContentValues();
ContentValues contentValues = new ContentValues();
contentValues.put("alarm",alarm);
contentValues.put("location",location);
contentValues.put("contact_number",contact);
long ins = db.insert("keywords",null,contentValues);
long upd = db.update("keywords",contentValues,"contact_number = ?",new String[]{contact});
// db.close();
if(ins == -1 && upd == -1)
return false;
else
return true;
}
public String getAlarm(String al) {
SQLiteDatabase db = this.getReadableDatabase(); // to get database in readable format
Cursor cursor = db.rawQuery("select alarm from keywords where contact_number=? ", new String[]{al});
String result = "";
if (cursor.moveToFirst()) {
result = cursor.getString(0);
}
cursor.close();
return result;
}
// checking if contact number exists in register activity
public boolean checkContactNo (String contact_no){
SQLiteDatabase db = this.getReadableDatabase();
//Cursors are what contain the result set of a query made against a database in Android.
// The Cursor class has an API that allows an app to read (in a type-safe manner) the columns
// that were returned from the query as well as iterate over the rows of the result set.
Cursor cursor = db.rawQuery("Select * from user where contact_number=?", new String[] {contact_no} );
if(cursor.getCount() > 0 )
return false;
else
return true;
}
public String getUsernameThroughContactNo(String contactNo){
SQLiteDatabase db = this.getReadableDatabase(); // to get database in readable format
Cursor cursor = db.rawQuery("select username from user where contact_number=?",new String[]{contactNo});
String a; //storing the required value
if (cursor.moveToFirst())
{
do {
a = cursor.getString(0);// each item will be stored
return a; // returning the required value
}
while(cursor.moveToNext());
}
return "";
}
}
I am trying to show the value of getAlarm() in other activity in toast but on the execution of every query I have tried it shows nothing in toast.Even if I try to get contact_number from this table it crashes.
Searched so much to solve the problem but didn't get solution.I hope here someone will find problem and give solution.Thanks in advance for help :)
In your 1st try, with this:
a = cursor.getString(cursor.getColumnIndex("balance"));
you try to get the value of the column "balance" instead of "alarm".
Your 2nd try looks ok, although you don't need the do loop since you return after you get the first value.
In your 3d try the error is:
cursor.getString(1);
The index 1 is wrong, it should be 0 because the column indexes are 0 based.
But the problem is also that you do not save the value anywhere and finally you return " "!!! Why?
You should do something like:
public String getAlarm(String al){
SQLiteDatabase db = this.getReadableDatabase();
Cursor cursor = db.rawQuery("select alarm from keywords where contact_number=? ",new String[]{al});
String result = "";
if (cursor.moveToFirst()) {
result = cursor.getString(0);
}
cursor.close();
return result;
}

Spark 2 Dataframe Save to Hive - Compaction

I am using spark session to save a data frame to hive table. The code is as below.
df.write.mode(SaveMode.Append).format("orc").insertInto("table")
The data comes to spark from kafka. This can be huge amount of data coming throughout the day. Does , spark dataframe save internally does hive compaction ?. If not what is the best way to do compaction at regular intervals without affecting the table insertions.
In your example you should add partitionBy as data can be in huge amount
df.write..mode(SaveMode.Append).format("orc").partitionBy("age")
OR you can also archive as below
The way I have done this is to first register a temp table in Spark job itself and then leverage the sql method of the HiveContext to create a new table in hive using the data from the temp table. For example if I have a dataframe df and HiveContext hc the general process is:
df.registerTempTable("my_temp_table")
hc.sql("Insert into overwrite table_name PARTITION SELECT a,b, PARTITION_col from my_temp_table")
public class HiveCompaction {
private static SparkConf sparkConf;
private static JavaSparkContext sc;
private static SparkSession sqlContext = springutil.getBean("testSparkSession");
private static HashMap<Object, Object> partitionColumns;
public static void compact(String table, Dataset<Row> dataToCompact) {
logger.info("Started Compaction for - " + table);
if (!partitionColumns.containsKey(table)) {
compact_table_without_partition(table, dataToCompact);
} else {
compact_table_with_partition(table, dataToCompact, partitionColumns);
}
logger.info("Data Overwritten in HIVE table : " + table + " successfully");
}
private static void compact_table_with_partition(String table, Dataset<Row> dataToCompact,
Map<Object, Object> partitionData) {
String[] partitions = ((String) partitionData.get(table)).split(",");
List<Map<Object, Object>> partitionMap = getPartitionsToCompact(dataToCompact, Arrays.asList(partitions));
for (Map mapper : partitionMap) {
// sqlContext.sql("REFRESH TABLE staging.dummy_table");
String query = "select * from " + table + " where " + frameQuery(" and ", mapper);
Dataset<Row> originalTable = sqlContext.sql(query.toString());
if (originalTable.count() == 0) {
dataToCompact.write().mode("append").format("parquet").insertInto(table);
} else {
String location = getHdfsFileLocation(table);
String uuid = getUUID();
updateTable(table, dataToCompact, originalTable, uuid);
String destinationPath = framePath(location, frameQuery("/", mapper), uuid);
sqlContext.sql("Alter table " + table + " partition(" + frameQuery(",", mapper) + ") set location '"
+ destinationPath + "'");
}
}
}
private static void compact_table_without_partition(String table, Dataset<Row> dataToCompact) {
String query = "select * from " + table;
Dataset<Row> originalTable = sqlContext.sql(query.toString());
if (originalTable.count() == 0) {
dataToCompact.write().mode("append").format("parquet").insertInto(table);
} else {
String location = getHdfsFileLocation(table);
String uuid = getUUID();
String destinationPath = framePath(location, null, uuid);
updateTable(table, dataToCompact, originalTable, uuid);
sqlContext.sql("Alter table " + table + " set location '" + destinationPath + "'");
}
}
private static void updateTable(String table, Dataset<Row> dataToCompact, Dataset<Row> originalTable, String uuid) {
Seq<String> joinColumnSeq = getPrimaryKeyColumns();
Dataset<Row> unModifiedRecords = originalTable.join(dataToCompact, joinColumnSeq, "leftanti");
Dataset<Row> dataToInsert1 = dataToCompact.withColumn("uuid", functions.lit(uuid));
Dataset<Row> dataToInsert2 = unModifiedRecords.withColumn("uuid", functions.lit(uuid));
dataToInsert1.write().mode("append").format("parquet").insertInto(table + "_compacted");
dataToInsert2.write().mode("append").format("parquet").insertInto(table + "_compacted");
}
private static String getHdfsFileLocation(String table) {
Dataset<Row> tableDescription = sqlContext.sql("describe formatted " + table + "_compacted");
List<Row> rows = tableDescription.collectAsList();
String location = null;
for (Row r : rows) {
if (r.get(0).equals("Location")) {
location = r.getString(1);
break;
}
}
return location;
}
private static String frameQuery(String delimiter, Map mapper) {
StringBuilder modifiedQuery = new StringBuilder();
int i = 1;
for (Object key : mapper.keySet()) {
modifiedQuery.append(key + "=");
modifiedQuery.append(mapper.get(key));
if (mapper.size() > i)
modifiedQuery.append(delimiter);
i++;
}
return modifiedQuery.toString();
}
private static String framePath(String location, String framedpartition, String uuid) {
StringBuilder loc = new StringBuilder(location);
loc.append("/");
if (StringUtils.isNotEmpty(framedpartition)) {
loc.append(framedpartition);
loc.append("/");
}
loc.append("uuid=");
loc.append(uuid);
logger.info(loc.toString());
return loc.toString();
}
public static Seq<String> getColumnSeq(List<String> joinColumns) {
List<String> cols = new ArrayList<>(joinColumns.size());
for (int i = 0; i < joinColumns.size(); i++) {
cols.add(joinColumns.get(i).toLowerCase());
}
return JavaConverters.asScalaBufferConverter(cols).asScala().readOnly();
}
private static String getUUID() {
StringBuilder uri = new StringBuilder();
Random rand = new Random();
int randNum = rand.nextInt(200);
String uuid = DateTimeFormatter.ofPattern("yyyyMMddHHmmSSS").format(LocalDateTime.now()).toString()
+ (String.valueOf(randNum));
return uuid;
}
private static List<Map<Object, Object>> getPartitionsToCompact(Dataset<Row> filteredRecords,
List<String> partitions) {
Column[] columns = new Column[partitions.size()];
int index = 0;
for (String c : partitions) {
columns[index] = new Column(c);
index++;
}
Dataset<Row> partitionsToCompact = filteredRecords.select(columns)
.distinct(); /**
* TOD : add filter condition for selecting
* known paritions
*/
JavaRDD<Map<Object, Object>> querywithPartitions = partitionsToCompact.toJavaRDD().map(row -> {
return convertRowToMap(row);
});
return querywithPartitions.collect();
}
private static Map<Object, Object> convertRowToMap(Row row) {
StructField[] fields = row.schema().fields();
List<StructField> structFields = Arrays.asList(fields);
Map<Object, Object> a = structFields.stream()
.collect(Collectors.toMap(e -> ((StructField) e).name(), e -> row.getAs(e.name())));
return a;
}
private static Seq<String> getPrimaryKeyColumns() {
ArrayList<String> primaryKeyColumns = new ArrayList<String>();
Seq<String> joinColumnSeq = getColumnSeq(primaryKeyColumns);
return joinColumnSeq;
}
/*
* public static void initSpark(String jobname) { sparkConf = new
* SparkConf().setAppName(jobname); sparkConf.setMaster("local[3]");
* sparkConf.set("spark.driver.allowMultipleContexts", "true"); sc = new
* JavaSparkContext(); sqlContext = new SQLContext(sc); }
*/
public static HashMap<Object, Object> getParitionColumns() {
HashMap<Object, Object> paritionColumns = new HashMap<Object, Object>();
paritionColumns.put((Object) "staging.dummy_table", "trade_date,dwh_business_date,region_cd");
return paritionColumns;
}
public static void initialize(String table) {
// initSpark("Hive Table Compaction -" + table);
partitionColumns = getParitionColumns();
}
}
Usage:
String table = "staging.dummy_table";
HiveCompaction.initialize(table);
Dataset<Row> dataToCompact = sparkSession.sql("select * from staging.dummy_table");
HiveCompaction.compact(table, dataToCompact);
sparkSession.sql("select * from staging.dummy_table_compacted").show();
System.out.println("Compaction successful");

Combining two hashmaps with the same key and same or different values

I have two hashmaps that I need to merge.
MAP 1 - [[LOCATION: United Kingdom], [PERSON: Alfred Theodore MacConkey], [ORGANIZATION: Royal Commission, United]]
MAP 2 -{LOCATION=[United Kingdom], ORGANIZATION=[Royal Commission], PERSON=[Alfred Theodore MacConkey]}
If I create Map 3 and do an addAll() some values get overwritten. Like in case of ORGANIZATION I only get Royal Commission and United gets overwritten. I wrote a merge code for the two but get a null pointer exception. I just want to know if this approach is correct. I will debug and figure out why I am getting the exception.
public static LinkedHashMap<String,Vector<String>> merge(HashMap<String, Vector<String>> a, HashMap<String, Vector<String>> b) {
LinkedHashMap<String,Vector<String>> c = new LinkedHashMap<String,Vector<String>>();
Set<Entry<String,Vector<String>>> entriesA = a.entrySet();
Set<Entry<String,Vector<String>>> entriesB = b.entrySet();
for (Map.Entry<String, Vector<String>> entry : entriesA ) {
Vector<String> aValues = a.get(entry.getValue());
String aKey = entry.getKey();
Vector<String> allValues = entriesA.contains(aKey) ? a.get(aKey) : new Vector<String>();
allValues.addAll(aValues);
c.put(aKey, allValues);
}
for (Map.Entry<String, Vector<String>> entry : entriesB ) {
Vector<String> bValues = b.get(entry.getValue());
String bKey = entry.getKey();
if(c.containsKey(bKey) && c.get(bKey).equals(bValues) ) {
continue;
}
else if(c.containsKey(bKey) && !(c.get(bKey).equals(bValues))) {
c.put(bKey, bValues);
}
}
return c;
}
This line:
Vector<String> aValues = a.get(entry.getValue());
should be:
Vector<String> aValues = entry.getValue();
UPDATE:
Oh! and same goes for bValues
UPDATE 2:
Yet another issue: entriesA.contains(aKey) should be a.contains(aKey)
UPDATE 3:
Try something like this:
LinkedHashMap<String, Vector<String>> c = new LinkedHashMap<String, Vector<String>>();
for (Map.Entry<String, Vector<String>> entry : a.entrySet()) {
Vector<String> aValues = entry.getValue();
String aKey = entry.getKey();
c.put(aKey, new Vector<String>(aValues));
}
for (Map.Entry<String, Vector<String>> entry : b.entrySet()) {
Vector<String> bValues = entry.getValue();
String bKey = entry.getKey();
Vector<String> cValues = c.get(bKey);
if (cValues == null) {
c.put(bKey, new Vector<String>(bValues));
} else {
cValues.addAll(bValues);
}
}
return c;
UPDATE 4:
To avoid duplicate values, replace line:
cValues.addAll(bValues);
With:
Set<String> values = new HashSet<String>(cValues);
values.addAll(bValues);
cValues.clear();
cValues.addAll(values);
This will only deal with duplicates created by the merge though, not those that already existed.

With OrmLite, is there a way to automatically update table schema when my POCO is modified?

Can OrmLite recognize differences between my POCO and my schema and automatically add (or remove) columns as necessary to force the schema to remain in sync with my POCO?
If this ability doesn't exist, is there way for me to query the db for table schema so that I may manually perform the syncing? I found this, but I'm using the version of OrmLite that installs with ServiceStack and for the life of me, I cannot find a namespace that has the TableInfo classes.
I created an extension method to automatically add missing columns to my tables. Been working great so far. Caveat: the code for getting the column names is SQL Server specific.
namespace System.Data
{
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = "exec sp_columns " + tableName;
var reader = cmd.ExecuteReader();
while (reader.Read())
{
var ordinal = reader.GetOrdinal("COLUMN_NAME");
columns.Add(reader.GetString(ordinal));
}
reader.Close();
}
return columns;
}
public static void AlterTable<T>(this IDbConnection db) where T : new()
{
var model = ModelDefinition<T>.Definition;
// just create the table if it doesn't already exist
if (db.TableExists(model.ModelName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(field.FieldName) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var alterSql = string.Format("ALTER TABLE {0} ADD {1} {2}",
model.ModelName,
field.FieldName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
}
No there is no current support for Auto Migration of RDBMS Schema's vs POCOs in ServiceStack's OrmLite.
There are currently a few threads being discussed in OrmLite's issues that are exploring the different ways to add this.
Here is a slightly modified version of code from cornelha to work with PostgreSQL. Removed this fragment
//private static List<string> GetColumnNames(object poco)
//{
// var list = new List<string>();
// foreach (var prop in poco.GetType().GetProperties())
// {
// list.Add(prop.Name);
// }
// return list;
//}
and used IOrmLiteDialectProvider.NamingStrategy.GetTableName and IOrmLiteDialectProvider.NamingStrategy.GetColumnName methods to convert table and column names from PascalNotation to this_kind_of_notation used by OrmLite when creating tables in PostgreSQL.
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName, IOrmLiteDialectProvider provider)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = getCommandText(tableName, provider);
var tbl = new DataTable();
tbl.Load(cmd.ExecuteReader());
for (int i = 0; i < tbl.Columns.Count; i++)
{
columns.Add(tbl.Columns[i].ColumnName);
}
}
return columns;
}
private static string getCommandText(string tableName, IOrmLiteDialectProvider provider)
{
if (provider == PostgreSqlDialect.Provider)
return string.Format("select * from {0} limit 1", tableName);
else return string.Format("select top 1 * from {0}", tableName);
}
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
var namingStrategy = provider.NamingStrategy;
// just create the table if it doesn't already exist
var tableName = namingStrategy.GetTableName(model.ModelName);
if (db.TableExists(tableName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName, provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(namingStrategy.GetColumnName(field.FieldName)) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var columnName = namingStrategy.GetColumnName(field.FieldName);
var alterSql = string.Format("ALTER TABLE {0} ADD COLUMN {1} {2}",
tableName,
columnName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
I implemented an UpdateTable function. The basic idea is:
Rename current table on database.
Let OrmLite create the new schema.
Copy the relevant data from the old table to the new.
Drop the old table.
Github Repo: https://github.com/peheje/Extending-NServiceKit.OrmLite
Condensed code:
public interface ISqlProvider
{
string RenameTableSql(string currentName, string newName);
string GetColumnNamesSql(string tableName);
string InsertIntoSql(string intoTableName, string fromTableName, string commaSeparatedColumns);
string DropTableSql(string tableName);
}
public static void UpdateTable<T>(IDbConnection connection, ISqlProvider sqlProvider) where T : new()
{
connection.CreateTableIfNotExists<T>();
var model = ModelDefinition<T>.Definition;
string tableName = model.Name;
string tableNameTmp = tableName + "Tmp";
string renameTableSql = sqlProvider.RenameTableSql(tableName, tableNameTmp);
connection.ExecuteNonQuery(renameTableSql);
connection.CreateTable<T>();
string getModelColumnsSql = sqlProvider.GetColumnNamesSql(tableName);
var modelColumns = connection.SqlList<string>(getModelColumnsSql);
string getDbColumnsSql = sqlProvider.GetColumnNamesSql(tableNameTmp);
var dbColumns = connection.SqlList<string>(getDbColumnsSql);
List<string> activeFields = dbColumns.Where(dbColumn => modelColumns.Contains(dbColumn)).ToList();
string activeFieldsCommaSep = ListToCommaSeparatedString(activeFields);
string insertIntoSql = sqlProvider.InsertIntoSql(tableName, tableNameTmp, activeFieldsCommaSep);
connection.ExecuteSql(insertIntoSql);
string dropTableSql = sqlProvider.DropTableSql(tableNameTmp);
//connection.ExecuteSql(dropTableSql); //maybe you want to clean up yourself, else uncomment
}
private static String ListToCommaSeparatedString(List<String> source)
{
var sb = new StringBuilder();
for (int i = 0; i < source.Count; i++)
{
sb.Append(source[i]);
if (i < source.Count - 1)
{
sb.Append(", ");
}
}
return sb.ToString();
}
}
MySql implementation:
public class MySqlProvider : ISqlProvider
{
public string RenameTableSql(string currentName, string newName)
{
return "RENAME TABLE `" + currentName + "` TO `" + newName + "`;";
}
public string GetColumnNamesSql(string tableName)
{
return "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '" + tableName + "';";
}
public string InsertIntoSql(string intoTableName, string fromTableName, string commaSeparatedColumns)
{
return "INSERT INTO `" + intoTableName + "` (" + commaSeparatedColumns + ") SELECT " + commaSeparatedColumns + " FROM `" + fromTableName + "`;";
}
public string DropTableSql(string tableName)
{
return "DROP TABLE `" + tableName + "`;";
}
}
Usage:
using (var db = dbFactory.OpenDbConnection())
{
DbUpdate.UpdateTable<SimpleData>(db, new MySqlProvider());
}
Haven't tested with FKs. Can't handle renaming properties.
I needed to implement something similiar and found the post by Scott very helpful. I decided to make a small change which will make it much more agnostic. Since I only use Sqlite and MSSQL, I made the getCommand method very simple, but can be extended. I used a simple datatable to get the columns. This solution works perfectly for my requirements.
public static class IDbConnectionExtensions
{
private static List<string> GetColumnNames(IDbConnection db, string tableName,IOrmLiteDialectProvider provider)
{
var columns = new List<string>();
using (var cmd = db.CreateCommand())
{
cmd.CommandText = getCommandText(tableName, provider);
var tbl = new DataTable();
tbl.Load(cmd.ExecuteReader());
for (int i = 0; i < tbl.Columns.Count; i++)
{
columns.Add(tbl.Columns[i].ColumnName);
}
}
return columns;
}
private static string getCommandText(string tableName, IOrmLiteDialectProvider provider)
{
if(provider == SqliteDialect.Provider)
return string.Format("select * from {0} limit 1", tableName);
else return string.Format("select top 1 * from {0}", tableName);
}
private static List<string> GetColumnNames(object poco)
{
var list = new List<string>();
foreach (var prop in poco.GetType().GetProperties())
{
list.Add(prop.Name);
}
return list;
}
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
// just create the table if it doesn't already exist
if (db.TableExists(model.ModelName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName,provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(field.FieldName) == false)
.ToList();
// add a new column for each missing field
foreach (var field in missing)
{
var alterSql = string.Format("ALTER TABLE {0} ADD {1} {2}",
model.ModelName,
field.FieldName,
db.GetDialectProvider().GetColumnTypeDefinition(field.FieldType)
);
Console.WriteLine(alterSql);
db.ExecuteSql(alterSql);
}
}
}
So I took user44 answer, and modified the AlterTable method to make it a bit more efficient.
Instead of looping and running one SQL query per field/column, I merge it into one with some simple text parsing (MySQL commands!).
public static void AlterTable<T>(this IDbConnection db, IOrmLiteDialectProvider provider) where T : new()
{
var model = ModelDefinition<T>.Definition;
var table = new T();
var namingStrategy = provider.NamingStrategy;
// just create the table if it doesn't already exist
var tableName = namingStrategy.GetTableName(model.ModelName);
if (db.TableExists(tableName) == false)
{
db.CreateTable<T>(overwrite: false);
return;
}
// find each of the missing fields
var columns = GetColumnNames(db, model.ModelName, provider);
var missing = ModelDefinition<T>.Definition.FieldDefinitions
.Where(field => columns.Contains(namingStrategy.GetColumnName(field.FieldName)) == false)
.ToList();
string alterSql = "";
string addSql = "";
// add a new column for each missing field
foreach (var field in missing)
{
var alt = db.GetDialectProvider().ToAddColumnStatement(typeof(T), field); // Should be made more efficient, one query for all changes instead of many
int index = alt.IndexOf("ADD ");
alterSql = alt.Substring(0, index);
addSql += alt.Substring(alt.IndexOf("ADD COLUMN")).Replace(";", "") + ", ";
}
if (addSql.Length > 2)
addSql = addSql.Substring(0, addSql.Length - 2);
string fullSql = alterSql + addSql;
Console.WriteLine(fullSql);
db.ExecuteSql(fullSql);
}

Cassandra Batch_mutate()

Please Provide an example on how to work with batch_mutate() in C#.net?
Thanks in advance.
Dictionary <string, Dictionary<string, List<Mutation>>> dictionary = new Dictionary<string, Dictionary<string, List<Mutation>>>();
List<Mutation> columnsToadd = new List<Mutation>();
List<Column> customers = new List<Column>();
//List of SuperColumns
customers.Add(new Column() { Name = utf8Encoding.GetBytes("street"), Timestamp = timeStamp, Value = utf8Encoding.GetBytes("Test") });
customers.Add(new Column() { Name = utf8Encoding.GetBytes("Zip"), Timestamp = timeStamp, Value = utf8Encoding.GetBytes("Test") });
customers.Add(new Column() { Name = utf8Encoding.GetBytes("city"), Timestamp = timeStamp, Value = utf8Encoding.GetBytes("Test Hills") });
Dictionary<string, List<Mutation>> innerMap = new Dictionary<string, List<Mutation>>();
Mutation columns = new Mutation()
{
Column_or_supercolumn = new ColumnOrSuperColumn() { Super_column = new SuperColumn() { Name = utf8Encoding.GetBytes("John1"), Columns = customers } }
};
columnsToadd.Add(columns);
ColumnPath nameColumnPath = new ColumnPath()
{
Column_family = "Super1",
Super_column = utf8Encoding.GetBytes("John1"),
Column = utf8Encoding.GetBytes("customers")
};
innerMap.Add("Super1", columnsToadd);
dictionary.Add("Phatduckk", innerMap);
client.batch_mutate("Keyspace1", dictionary, ConsistencyLevel.ONE);

Resources