I have a table in DynamoDB that stores some data, the partition key - username - gets stored as the CognitoUser name along with a sort key provided by the user (Sort key is "Code") - these make up the primary key.
I want to query a single Item in DynamoDB that matches the primary key.
Here is my Lambda function;
exports.handler = (event, context, callback) => {
const type = event.type;
if (type === 'all'){
const params = {
TableName: 'test-record'
};
dynamodb.scan(params,function(err,data){
if(err){
console.log(err);
callback(err);
}else{
console.log(data);
const items = data.Items.map(
(dataField) =>{
return{
Date: dataField.Date.S,
Code: dataField.Code.S,
TelNumber: +dataField.TelNumber.N,
NameOfPerson: dataField.NameOfPerson.S,
FileNum: dataField.FileNum.S,
Purpose: dataField.Purpose.S,
};
}
);
callback(null,items);
}
});
}else if(type == 'single'){
const params = {
Key: {
"username": {
S: dataField.username.S,
},
"Code": {
S: dataField.Code.S
}
},
TableName: "test-record"
};
dynamodb.getItem(params, function(err,data){
if(err){
console.log(err);
callback(err);
}else{
console.log(data);
callback(null, data);
}
});
}else{
callback('Something went wrong');
} };
If i hardcode "username" and "Code" in this function, it works and returns the correct data, but using the above pasted code gives me an error saying;
{ "errorMessage": "RequestId: c410e493-b774-11e7-a528-37d94b51b57b Process exited before completing request" }
Any ideas how to get items from DynamoDB without having to hard them into Lambda?
One thing, I don't see how datafield is being initialized in the if(type == 'single') if statement outside of the map function...
But aside from that, I think you should try using Expression Attributes names and values. I haven't tested this with getItem but it works for query:
I made the variable i used to get query the table from the my event body like:
const queryVariable = eval(JSON.stringify(e.body));
Then I used Expression Attributes and Expression Values in my params to make the query.
var params = {
TableName: 'tablename',
KeyConditionExpression: '#hkn = :v and begins_with(#sk, :skv)',
ExpressionAttributeNames: {
"#hkn": "Hash_Key_Name",
"#sk": "SortKeyName"
},
ExpressionAttributeValues: {
":qv": queryValue,
":skv": sortKeyValue
}
}
Then you can make your call to docClient.query(params, function(err, data) so on and so forth. Maybe you can use this instead of getItem.
But if you don't need to grab a single item and want to query your database for some variable keys, try this.
but a very simple Lambda function I've written to get data from DDB into a React Native Application goes like this:
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient({region: 'us-west-2'});
exports.handle = function(e, ctx, cb) {
var params = { TableName: 'TableName' };
docClient.scan(params, function(err, data) {
if(err) {
cb(err, null);
} else {
cb(null, data.Items);
}
});
}
Make sure the role being used to execute the lambda function include Dynamodb read access (at least)
Related
My project is a typescript project which uses a gulpfile to start.
In the gulpfile I use express where I try to access req.originalUrl where req is the request object
It throws the error Property 'originalUrl' does not exist on type 'Request<ParamsDictionary, any, any, ParsedQs, Record<string, any>>'.
I have tried the following with no avail:
Add typeRoots to tsconfig and custom typings in /src/types/express/index.d.ts
Install #types/express-serve-static-core
I have noticed that using req['originalUrl'] but I am not supposed to change this file.
Below is my package json:
"dependencies": {
"#types/express-serve-static-core": "^4.17.31",
"#types/fs-extra": "^8.0.1",
"#types/gulp": "^4.0.6",
"#types/node": "^13.13.48",
"#types/sass": "^1.16.0",
"cors": "^2.8.5",
"express": "^4.17.1",
"express-http-proxy": "^1.6.2",
"fs-extra": "^8.1.0",
"gulp": "^4.0.2",
"jest": "^26.4.0",
"mustache": "^4.2.0",
"portfinder": "^1.0.28",
"sass": "^1.32.8",
"ts-node": "^8.6.2",
"typescript": "^3.9.9"
},
"devDependencies": {
"#types/express": "^4.17.11",
"#types/express-http-proxy": "^1.6.1",
"#types/jest": "^26.0.22",
"#types/jsdom": "^16.2.9",
"#types/mocha": "^8.2.2"
}
Can someone help me with this?
The problem seems to be with the package version of typescript
Using "typescript": "^4.6.4", works
I am running application by this command
./spark-submit --master yarn --deploy-mode cluster --packages org.mongodb.spark:mongo-spark-connector_2.11:2.2.0 --class com.mmi.process.app.StartApp ./SparkFileStreaming.jar
But it stop automatically after process hdfs file. Please help why it is happening.
My Code is Below
SparkConf sparkConf = new SparkConf().setAppName("MyWordCount");
sparkConf.set("spark.streaming.receiver.whiteAheadLog.enable", "true");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(60));
streamingContext.checkpoint("hdfs://Mongo1:9000/probeAnalysis/checkpoint");
String filePath = "hdfs://Master:9000/mmi_traffic/listenerTransaction/2020/*/*/*/";
JavaDStream<String> dStream = streamingContext.textFileStream(filePath).cache();
dStream.foreachRDD((rdd) -> {
try {
Long startTime = System.currentTimeMillis();
long total = rdd.count();
if(total > 0) {
String filePath1 = Utils.getFilePath(rdd);
SparkSession sparkSession = JavaSparkSessionSingleton.getInstance();
Dataset<Row> jsonDataset = sparkSession.read().json(path.toString()).cache();
jsonDataset.createOrReplaceTempView("mmi_probes");
Dataset<Row> probAnalysisData = sparkSession.sql("select sourceId, areaCode, vehicleType, "
+ "count(distinct deviceId) as totalDevices, "
+ "sum(valid+duplicate+history+future) as total, sum(valid) as valid, "
+ "sum(duplicate) as duplicate, sum(history) as history, sum(future) as future, "
+ "sum(count_0_2) as 0_2, sum(count_2_5) as 2_5, sum(count_5_10) as 5_10, "
+ "sum(count_10_15) as 10_15"
+ " from mmi_probes group by sourceId, areaCode, vehicleType");
MongoSpark.write(probAnalysisData)
.option("spark.mongodb.output.uri", "mongodb://Mongo-server:27017/mmi_traffic.mmi_test_all")
.mode("append")
.save();
sparkSession.close();
}
} catch(Exception e) {
e.printStackTrace();
}
});
streamingContext.start();
streamingContext.awaitTermination();
streamingContext.close();
I have tried to add the Current Balance (Currency) field to the "My Documents (SP.40.20.00)" page of the customer portal in the header area, but those fields do not show up no matter what I do, and modifying (Overriding) the Aggregate function doesn't seem possible (Wrapper error). Is there any other way for me to get the Currency (customer) total instead of the default currency total?
Link to the project
The base graph you're working on (ARDocumentEnq) has visibility validations in a RowSelected event based on the feature set selected in the licence and the selected row CuryID/company base currency.
Check that those features are enabled on the main site in Configuration->Common Settings->Licensing->Enable/Disable Features and work out the conditions you need for row.CuryID and Company.Current.BaseCuryID. To override base graph conditions, you can add a RowSelected event in a graph extension.
public class ARDocumentEnq_Extension:PXGraphExtension<ARDocumentEnq>
{
public virtual void ARDocumentFilter_RowSelected(PXCache cache, PXRowSelectedEventArgs e)
{
ARDocumentEnq.ARDocumentFilter row = (ARDocumentEnq.ARDocumentFilter)e.Row;
if (row == null) return;
PXCache docCache = Base.Documents.Cache;
// Forcing display
bool byPeriod = true; //(row.Period != null);
bool isMCFeatureInstalled = true; //PXAccess.FeatureInstalled<FeaturesSet.multicurrency>();
bool isForeignCurrencySelected = true; //String.IsNullOrEmpty(row.CuryID) == false && (row.CuryID != this.Company.Current.BaseCuryID);
bool isBaseCurrencySelected = true; //String.IsNullOrEmpty(row.CuryID) == false && (row.CuryID == this.Company.Current.BaseCuryID);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentFilter.curyID>(cache, row, isMCFeatureInstalled);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentFilter.curyBalanceSummary>(cache, row, isMCFeatureInstalled && isForeignCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentFilter.curyDifference>(cache, row, isMCFeatureInstalled && isForeignCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentFilter.curyCustomerBalance>(cache, row, isMCFeatureInstalled && isForeignCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentFilter.curyCustomerDepositsBalance>(cache, row, isMCFeatureInstalled && isForeignCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.curyID>(docCache, null, isMCFeatureInstalled);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.rGOLAmt>(docCache, null, isMCFeatureInstalled && !isBaseCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.curyBegBalance>(docCache, null, byPeriod && isMCFeatureInstalled && !isBaseCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.begBalance>(docCache, null, byPeriod);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.curyOrigDocAmt>(docCache, null, isMCFeatureInstalled && !isBaseCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.curyDocBal>(docCache, null, isMCFeatureInstalled && !isBaseCurrencySelected);
PXUIFieldAttribute.SetVisible<ARDocumentEnq.ARDocumentResult.curyDiscActTaken>(docCache, null, isMCFeatureInstalled && !isBaseCurrencySelected);
}
}
I am write my hazelcast prototype code. I encountered the following error when running the reader. Not sure what did I miss.
SEVERE: [host1]:5701 [dev] [3.7.3] Service with name'hz:impl:cacheService' not found!
com.hazelcast.core.HazelcastException: Service with name 'hz:impl:cacheService' not found!
at com.hazelcast.spi.impl.NodeEngineImpl.getService(NodeEngineImpl.java:350)
at com.hazelcast.spi.Operation.getService(Operation.java:239)
at com.hazelcast.cache.impl.operation.PostJoinCacheOperation.run(PostJoinCacheOperation.java:44)
at com.hazelcast.internal.cluster.impl.operations.PostJoinOperation.run(PostJoinOperation.java:93)
at com.hazelcast.spi.impl.operationservice.impl.OperationRunnerImpl.run(OperationRunnerImpl.java:181)
at com.hazelcast.spi.impl.operationexecutor.impl.OperationExecutorImpl.run(OperationExecutorImpl.java:375)
at com.hazelcast.spi.impl.operationservice.impl.OperationServiceImpl.run(OperationServiceImpl.java:267)
at com.hazelcast.spi.impl.operationservice.impl.OperationServiceImpl.runOperationOnCallingThread(OperationServiceImpl.java:262)
at com.hazelcast.internal.cluster.impl.operations.FinalizeJoinOperation.runPostJoinOp(FinalizeJoinOperation.java:139)
at com.hazelcast.internal.cluster.impl.operations.FinalizeJoinOperation.run(FinalizeJoinOperation.java:104)
at com.hazelcast.spi.impl.operationservice.impl.OperationRunnerImpl.run(OperationRunnerImpl.java:181)
at com.hazelcast.spi.impl.operationservice.impl.OperationRunnerImpl.run(OperationRunnerImpl.java:396)
at com.hazelcast.spi.impl.operationexecutor.impl.OperationThread.process(OperationThread.java:117)
at com.hazelcast.spi.impl.operationexecutor.impl.OperationThread.run(OperationThread.java:102)
Caused by: com.hazelcast.spi.exception.ServiceNotFoundException: Service with name 'hz:impl:cacheService' not found!
... 14 more
Here is my code
public class Reader {
public static void main(String[] args) throws InterruptedException {
Config config = new Config();
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(“host1”).setEnabled(true);
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(“host2”).setEnabled(true);
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
IMap<String, DataDetail> mapCustomers = hz.getMap("customers");
while (true) {
mapCustomers.lock("Joe");
try {
System.out.println("Joe(R) => " + mapCustomers.get("Joe").getTimeStamp());
Thread.sleep(2000);
} finally {
mapCustomers.unlock("Joe");
}
}
}
}
public class Writer {
public static void main(String[] args) throws InterruptedException {
Config config = new Config();
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(“host1”).setEnabled(true);
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(“host2”).setEnabled(true);
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
IMap<String, DataDetail> mapCustomers = hz.getMap("customers");
DataDetail od = new DataDetail ();
od.setDataId(“X1”);
od.setTimeStamp(0);
int i = 1;
while (true) {
mapCustomers.lock("Joe");
try {
mapCustomers.put("Joe", od);
od.setTimeStamp(od.getTimeStamp() + 1);
Thread.sleep(2000);
DataDetail localOd = mapCustomers.get("Joe");
System.out.println("Joe(W) => " + localOd.getTimeStamp());
} finally {
mapCustomers.unlock("Joe");
}
}
}
}
public class DataDetail implements DataSerializable {
String dataId;
Integer timeStamp;
public DataDetail() {
}
public void setDataId(String dataId) {
this.dataId = dataId;
}
public String getDataId() {
return this.dataId;
}
public void setTimeStamp(Integer timeStamp) {
this.timeStamp = timeStamp;
}
public Integer getTimeStamp() {
return this.timeStamp;
}
public void writeData( ObjectDataOutput out ) throws IOException {
out.writeUTF(dataId);
out.writeInt(timeStamp);
}
public void readData( ObjectDataInput in ) throws IOException {
dataId = in.readUTF();
timeStamp = in.readInt();
}
}
You have to add the JCache API classes (JAR) to your classpath, otherwise the CacheService is not started.
If you want to use jcache to connect to Hazelcast servers, you have to add cache-api-1.1.0.jar in the lib directory. After that, you have to show the jcache api jar when exporting CLASSPATH in the start.sh.
When the former export CLASSPATH expression in start.sh:
export CLASSPATH="$HAZELCAST_HOME/lib/hazelcast-all-3.9.3.jar"
after the edit:
export CLASSPATH="$HAZELCAST_HOME/lib/hazelcast-all-3.9.3.jar:$HAZELCAST_HOME/lib/cache-api-1.1.0.jar:$CLASSPATH/*"
I am creating a CMS for a site. There is an about page that needs to have content from the CMS. There needs to be only one document acting as a config file for the about page. My proposed solution for this is:
Create an about page model.
On save I will check to see if there is an existing document.
If there is an existing document, update that document. If there isn't save a new one.
Is there a better way to do this? Is there a way to do this in the save pre hook for my schema ?
Something like this could be done for a singleton model:
HomePageSchema.statics = {
getSingleton: function (cb) {
this.findOne()
.sort({updated: -1})
.limit(1)
.exec(function (error, model) {
if (error) {
cb(error, null);
} else if (model == null) {
cb(error, new HomePage());
} else {
cb(error, model);
}
});
},
};
and then
HomePage.getSingleton((err, homepage) => {
homepage.image = '/images/myImage.jpg';
homepage.headline = 'Homepage Headline';
homepage.save();
});
Try findAndModify of mongodb as long as you have same _id document will be updated else a new document will be created.
Same is incorporated in mongoose under findOneAndUpdate
You can do that in three ways:
Firstly call findOne then in callback you are checking if row exists and then if not exists(document === null) call save,
Use findOneAndUpdate method from Mongoose with option upsert to true,
Do it in as you described, by using prehook.
In that case I will prefer to use the first or second option. Hooks have some magic itself, which it means that for the person which is looking on the code for the first time, it will be a little bit harder to find what's going on :P But hook could be better solution if the same scenario(that document save action) is ran in many places. It's debatable issue - I've just put my two cents ...