With Amazon native .net lib, batchget is like this
var batch = context.CreateBatch<MyClass>();
batch.AddKey("hashkey1");
batch.AddKey("hashkey2");
batch.AddKey("hashkey3");
batch.Execute();
var result = batch.results;
Now I'm testing to use servicestack.aws, however I couldn't find how to do it. I've tried the following, both failed.
//1st try
var q1 = db.FromQueryIndex<MyClass>(x => x.room_id == "hashkey1" || x.room_id == "hashkey2"||x.room_id == "hashkey3");
var result = db.Query(q1);
//2nd try
var result = db.GetItems<MyClass>(new string[]{"hashkey1","hashkey2","hashkey3"});
In both cases, it threw an exception that says
Additional information: Invalid operator used in KeyConditionExpression: OR
Please help me. Thanks!
Using GetItems should work as seen with this Live Example on Gistlyn:
public class MyClass
{
public string Id { get; set; }
public string Content { get; set; }
}
db.RegisterTable<MyClass>();
db.DeleteTable<MyClass>(); // Delete existing MyClass Table (if any)
db.InitSchema(); // Creates MyClass DynamoDB Table
var items = 5.Times(i => new MyClass { Id = $"hashkey{i}", Content = $"Content {i}" });
db.PutItems(items);
var dbItems = db.GetItems<MyClass>(new[]{ "hashkey1","hashkey2","hashkey3" });
"Saved Items: {0}".Print(dbItems.Dump());
If your Item has both a Hash and a Range Key you'll need to use the GetItems<T>(IEnumerable<DynamoId> ids) API, e.g:
var dbItems = db.GetItems<MyClass>(new[]{
new DynamoId("hashkey1","rangekey1"),
new DynamoId("hashkey2","rangekey3"),
new DynamoId("hashkey3","rangekey4"),
});
Query all Items with same HashKey
If you want to fetch all items with the same HashKey you need to create a DynamoDB Query as seen with this Live Gistlyn Example:
var items = 5.Times(i => new MyClass {
Id = $"hashkey{i%2}", RangeKey = $"rangekey{i}", Content = $"Content {i}" });
db.PutItems(items);
var rows = db.FromQuery<MyClass>(x => x.Id == "hashkey1").Exec().ToArray();
rows.PrintDump();
Related
In Opportunity screen, the definition of the data view for Relations is simply :
public CRRelationsList<CROpportunity.noteID> Relations;
When a Sales Order is raised from the Opportunity. I'd like to display the Relations defined from the source Opporunity in another tab. And I'm just struggling how to write the the data view and pass the Opportunity noteid.
public CRRelationsList<???>Relations;
Thanks !
The generic type in dataviews often resolve to the current record.
In CRRelationsList class the generic type is named TNoteField:
public class CRRelationsList<TNoteField> : PXSelect<CRRelation>
where TNoteField : IBqlField
ssuming the dataview is declared as CRRelationsList<CROpportunity.noteID>.
The generic type value will be resolved like this Caches[typeof(CROpportunity)].Current.NoteID.
protected virtual void CRRelation_RefNoteID_FieldDefaulting(PXCache sender, PXFieldDefaultingEventArgs e)
{
// Get a cache object of type CROpportunity
var refCache = sender.Graph.Caches[BqlCommand.GetItemType(typeof(TNoteField))];
// Get the NoteID field value of the current CROpportunity object
e.NewValue = refCache.GetValue(refCache.Current, typeof(TNoteField).Name);
}
So to set DAC.Field of CRelationsList<DAC.field> you would do:
// In a graph extension (PXGraphExtension)
Base.Caches[typeof(DAC)].Current.Fied = ???;
// Or in graph (PXGraph)
Caches[typeof(DAC)].Current.Fied = ???;
If current DAC object is null you need to insert a record in a dataview or directly in the cache object.
I'm not sure re-using CRRelationsList list is the best approach if you want to simply display records because it does much more than that. It should be possible to extract the select request out of it and directly substitute the TNoteField value:
private static PXSelectDelegate GetHandler()
{
return () =>
{
var command = new Select2<CRRelation,
LeftJoin<BAccount, On<BAccount.bAccountID, Equal<CRRelation.entityID>>,
LeftJoin<Contact,
On<Contact.contactID, Equal<Switch<Case<Where<BAccount.type, Equal<BAccountType.employeeType>>, BAccount.defContactID>, CRRelation.contactID>>>,
LeftJoin<Users, On<Users.pKID, Equal<Contact.userID>>>>>,
Where<CRRelation.refNoteID, Equal<Current<TNoteField>>>>();
var startRow = PXView.StartRow;
int totalRows = 0;
var list = new PXView(PXView.CurrentGraph, false, command).
Select(null, null, PXView.Searches, PXView.SortColumns, PXView.Descendings, PXView.Filters,
ref startRow, PXView.MaximumRows, ref totalRows);
PXView.StartRow = 0;
foreach (PXResult<CRRelation, BAccount, Contact, Users> row in list)
{
var relation = (CRRelation)row[typeof(CRRelation)];
var account = (BAccount)row[typeof(BAccount)];
relation.Name = account.AcctName;
relation.EntityCD = account.AcctCD;
var contact = (Contact)row[typeof(Contact)];
if (contact.ContactID == null && relation.ContactID != null &&
account.Type != BAccountType.EmployeeType)
{
var directContact = (Contact)PXSelect<Contact>.
Search<Contact.contactID>(PXView.CurrentGraph, relation.ContactID);
if (directContact != null) contact = directContact;
}
relation.Email = contact.EMail;
var user = (Users)row[typeof(Users)];
if (account.Type != BAccountType.EmployeeType)
relation.ContactName = contact.DisplayName;
else
{
if (string.IsNullOrEmpty(relation.Name))
relation.Name = user.FullName;
if (string.IsNullOrEmpty(relation.Email))
relation.Email = user.Email;
}
}
return list;
};
}
Automapper v4.0 was very straight forward to use within a method, can someone help rewrite this for v5.0 please (specifically the Mapper code):
public IEnumerable<NotificationDto> GetNewNotifications()
{
var userId = User.Identity.GetUserId();
var notifications = _context.UserNotifications
.Where(un => un.UserId == userId && !un.IsRead)
.Select(un => un.Notification)
.Include(n => n.Gig.Artist)
.ToList();
Mapper.CreateMap<ApplicationUser, UserDto>();
Mapper.CreateMap<Gig, GigDto>();
Mapper.CreateMap<Notification, NotificationDto>();
return notifications.Select(Mapper.Map<Notification, NotificationDto>);
}
UPDATE:
It seems that EF Core doesn't project what AutoMapper is mapping with:
return notifications.Select(Mapper.Map<Notification, NotificationDto>);
But I do get results in Postman with the following code:
return notifications.Select(n => new NotificationDto()
{
DateTime = n.DateTime,
Gig = new GigDto()
{
Artist = new UserDto()
{
Id = n.Gig.Artist.Id,
Name = n.Gig.Artist.Name
},
DateTime = n.Gig.DateTime,
Id = n.Gig.Id,
IsCancelled = n.Gig.IsCancelled,
Venue = n.Gig.Venue
},
OriginalVenue = n.OriginalVenue,
OriginalDateTime = n.OriginalDateTime,
Type = n.Type
});
If you want to keep using static instance - the only change is in mapper initialization:
Mapper.Initialize(cfg =>
{
cfg.CreateMap<ApplicationUser, UserDto>();
cfg.CreateMap<Gig, GigDto>();
cfg.CreateMap<Notification, NotificationDto>();
});
Also you should run this code only once per AppDomain (somewhere on startup for example) and not every time you calling GetNewNotifications.
I know that Servicestack.Ormlite is setup to be a 1:1 mapping between poco and database table. I have a situation where I will have groups of tables that are of the same structure and they are created as necessary. I am trying to find a way to be able to do something where I can continue to use the IDbConnection and specify the table name in CRUD operations.
Something like
using(var db = _conn.OpenDbConnection()){
db.SaveAll(objList, "DIFFERENT_TABLE");
}
I was easily able to work around to make creating and deleting the tables. I am hoping that I can use of the ExpressionVisitor or something else to help change the table name before it is executed. One of the requirements of the project is that it be database agnostic, which is why I am trying to not manually write out the SQL.
Solutions
Here are a couple of functions that I ended up creating if anyone out there wants some more examples.
public static List<T> SelectTable<T>(this IDbConnection conn, string tableName) {
var stmt = ModelDefinition<T>.Definition.SqlSelectAllFromTable;
stmt = stmt.Replace(ModelDefinition<T>.Definition.Name, tableName.FmtTable());
return conn.Select<T>(stmt);
}
public static List<T> SelectTableFmt<T>(this IDbConnection conn, string tableName, string sqlFilter,
params object[] filterParams) {
var stmt = conn.GetDialectProvider().ToSelectStatement(typeof (T), sqlFilter, filterParams);
stmt = stmt.Replace(ModelDefinition<T>.Definition.Name, tableName.FmtTable());
return conn.Select<T>(stmt);
}
public static void InsertTable<T>(this IDbConnection conn, T obj, string tablename) {
var stmt = conn.GetDialectProvider().ToInsertRowStatement(null, obj);
stmt = stmt.Replace(obj.GetType().Name, tablename.FmtTable());
conn.ExecuteSql(stmt);
}
public static int SaveAll<T>(this IDbConnection conn, string tablename, IEnumerable<T> objs) {
var saveRows = objs.ToList();
var firstRow = saveRows.FirstOrDefault();
if (Equals(firstRow, default(T))) return 0;
var defaultIdValue = firstRow.GetId().GetType().GetDefaultValue();
var idMap = defaultIdValue != null
? saveRows.Where(x => !defaultIdValue.Equals(x.GetId())).ToSafeDictionary(x => x.GetId())
: saveRows.Where(x => x.GetId() != null).ToSafeDictionary(x => x.GetId());
var existingRowsMap = conn.SelectByIds<T>(tablename, idMap.Keys).ToDictionary(x => x.GetId());
var modelDef = ModelDefinition<T>.Definition;
var dialectProvider = conn.GetDialectProvider();
var rowsAdded = 0;
using (var dbTrans = conn.OpenTransaction()) {
foreach (var obj in saveRows) {
var id = obj.GetId();
if (id != defaultIdValue && existingRowsMap.ContainsKey(id)) {
var updStmt = dialectProvider.ToUpdateRowStatement(obj);
updStmt = updStmt.Replace(obj.GetType().Name, tablename.FmtTable());
conn.ExecuteSql(updStmt);
}
else {
if (modelDef.HasAutoIncrementId) {}
var stmt = dialectProvider.ToInsertRowStatement(null, obj);
stmt = stmt.Replace(obj.GetType().Name, tablename.FmtTable());
conn.ExecuteSql(stmt);
rowsAdded++;
}
}
dbTrans.Commit();
}
return rowsAdded;
}
OrmLite supports specifying the table name for Update and Delete operations. Unfortunately the examples in the readme here have yet to be updated. This is the required format:
UPDATE:
db.UpdateFmt(table: "Person", set: "FirstName = {0}".Fmt("JJ"), where: "LastName = {0}".Fmt("Hendrix"));
DELETE:
db.DeleteFmt(table: "Person", where: "Age = {0}".Fmt(27));
The methods you need can be found here. You should be able to use .Exec to handle reading and insert operations.
I want to configure my Mongo DB to create sequence number for an Id column. Ex. It has to start from 1001 and increase by 1 automatically when I insert next row. I have my schema definitions as part of Node.JS how to add this configuration in Node schema?
MongoDB doesn't support this out of the box. The way I've implemented this (albeit in C#) is to create a "Sequence" collection with a key and a next number. You can atomically increment and return the next number then use this as the id in your collection.
This is a C# function, using the findandmodify mongodb command to fetch and update a sequence number for a given "key".
public long GetNextSequenceNumber(string name, string key)
{
var update = new BsonDocument(new BsonElement("$inc", new BsonDocument(new BsonElement("SequenceNumber", 1))));
var query = new BsonDocument("_id", key);
var command = new CommandDocument {
{ "findandmodify" , name },
{ "query", query},
{ "update" , update},
{ "new" , true},
};
var res = Db.RunCommand(command);
if (res.Response["value"] != BsonNull.Value)
{
var o = BsonSerializer.Deserialize<Sequence>(res.Response["value"].ToBsonDocument());
return o.SequenceNumber;
}
else
{
var o = new Sequence() { Id = key, SequenceNumber = 0 };
Db.GetCollection(name).Insert<Sequence>(o);
return o.SequenceNumber;
}
}
and the Sequence model:
public class Sequence
{
public string Id { get; set; }
public long SequenceNumber { get; set; }
}
The sequence documents look like:
{
_id : 'mykey',
SequenceNumber : NumberLong(1234)
}
If you need converting it to javascript please ask.
Hope that helps.
I have been working on making a Search using Solrnet which is working the way I want to. But I just would like some advice on the best way to pass my query parameters from my web page into Solrnet.
What I would ideally like to do is pass my query string parameters similar to how this site does it: http://www.watchfinder.co.uk/SearchResults.aspx?q=%3a&f_brand=Rolex&f_bracelets=Steel&f_movements=Automatic.
As you can see from the sites query string it looks like it is being passed into SolrNet directly. Here is I am doing it at the moment (facet query segment):
public class SoftwareSalesSearcher
{
public static SoftwareSalesSearchResults Facet()
{
ISolrOperations solr = SolrOperationsCache.GetSolrOperations(ConfigurationManager.AppSettings["SolrUrl"]);
//Iterate through querystring to get the required fields to query Solrnet
List queryCollection = new List();
foreach (string key in HttpContext.Current.Request.QueryString.Keys)
{
queryCollection.Add(new SolrQuery(String.Format("{0}:{1}", key, HttpContext.Current.Request.QueryString[key])));
}
var lessThan25 = new SolrQueryByRange("SoftwareSales", 0m, 25m);
var moreThan25 = new SolrQueryByRange("SoftwareSales", 26m, 50m);
var moreThan50 = new SolrQueryByRange("SoftwareSales", 51m, 75m);
var moreThan75 = new SolrQueryByRange("SoftwareSales", 76m, 100m);
QueryOptions options = new QueryOptions
{
Rows = 0,
Facet = new FacetParameters {
Queries = new[] { new SolrFacetQuery(lessThan25), new SolrFacetQuery(moreThan25), new SolrFacetQuery(moreThan50), new SolrFacetQuery(moreThan75) }
},
FilterQueries = queryCollection.ToArray()
};
var results = solr.Query(SolrQuery.All, options);
var searchResults = new SoftwareSalesSearchResults();
List softwareSalesInformation = new List();
foreach (var facet in results.FacetQueries)
{
if (facet.Value != 0)
{
SoftwareSalesFacetDetail salesItem = new SoftwareSalesFacetDetail();
salesItem.Price = facet.Key;
salesItem.Value = facet.Value;
softwareSalesInformation.Add(salesItem);
}
}
searchResults.Results = softwareSalesInformation;
searchResults.TotalResults = results.NumFound;
searchResults.QueryTime = results.Header.QTime;
return searchResults;
}
}
At the moment I can't seem to see how I can query all my results from my current code by add the following querystring: q=:.
I'm not sure what you mean by "parameters being passed into SolrNet directly". It seems that watchfinder is using some variant of the model binder included in the SolrNet sample app.
Also take a look at the controller in the sample app to see how the SolrNet parameters are built.