I'm new at AWS, maybe I'm missing something obvious so help is required.
I have 2 version of code, the only different is passing bucket as a 4 chars string vs 5 chars. getting different response from aws.
why is that?
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
s3.createBucket({Bucket: 'node4'}, function() {
var params = {Bucket: 'node4', Key: 'myKey', Body: 'Hello!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to myBucket/myKey");
});
});
running the app.js:
➜ aws node app.js
{ [AllAccessDisabled: All access to this object has been disabled]
message: 'All access to this object has been disabled',
code: 'AllAccessDisabled',
region: null,
time: Fri Feb 05 2016 20:45:11 GMT+0200 (IST),
requestId: 'somerequestId',
extendedRequestId: 'someextendedRequestId',
statusCode: 403,
retryable: false,
retryDelay: 30 }
second code:
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
s3.createBucket({Bucket: 'node4e'}, function() {
var params = {Bucket: 'node4e', Key: 'myKey', Body: 'Hello!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to myBucket/myKey");
});
});
running the app.js:
➜ aws node app.js
Successfully uploaded data to myBucket/myKey
the only different is passing bucket as a 4 chars string vs 5 chars
Actually the only difference is that you don't have write access to the bucket named 'node4' and you do have access to a bucket named 'node4e'. Did you check to see if both buckets were actually created successfully? I notice you aren't checking for errors in the createBucket() call, just the putObject() call.
Those are fairly generic bucket names you are using, I wouldn't be surprised if the one that is failing, 'node4', is already in use by another AWS account.
Related
I have an RDS SQL Server configured for which I need to get all successful/failed login attempts and create a report out of it (csv file) and upload it to S3 bucket.
I am using below queries to get both successful and failed logins:
# Fetch failed login attempts
SELECT *
FROM msdb.dbo.rds_fn_get_audit_file ('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default )
WHERE action_id = 'LGIF';
# Fetch successful login attempts
SELECT *
FROM msdb.dbo.rds_fn_get_audit_file ('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default)
WHERE action_id ='LGIS';
I need to do it through node js based lambda function in AWS. How can I do it? I am totally new to node js and could not find any example.
You'll need to write a lambda that can talk to SQL, REF: AWS Lambda NodeJS call to SQL Server returns no data and no errors
var sql = require("mssql");
// config for your database
var config = {
user: 'xxuser',
password: 'xxxx',
server: 'mydns',
database: 'tavier'
};
module.exports.rdsquery = async event => {
console.log('called rdsquery')
try{
// connect to your database
await sql.connect(config, function (err) {
console.log('connected')
if (err)
console.log('rdsquery: '+err)
// create Request object
var request = new sql.Request();
// query to the database and get the records
request.query('SELECT *
FROM msdb.dbo.rds_fn_get_audit_file
('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default )
WHERE action_id = 'LGIF'', function (err, recordset) {
if (err)
console.log('rdsquery-sql: '+err)
// send records as a response
console.log('logging recordset')
console.log(recordset);
return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify(recordset)};
});
});
}
catch(e)
{
console.log('rdsquery-catch: '+e)
return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify('ERR: '+e)};
}
//return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify('test here')};
};
Instead of returning the recordset, save it to S3, REF: https://stackoverflow.com/a/40188305/495455
var AWS = require('aws-sdk');
function putObjectToS3(bucket, key, data){
var s3 = new AWS.S3();
var params = {
Bucket : bucket,
Key : key,
Body : data
}
s3.putObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
}
I assume your file is generated on your own server not in the cloud.
IMHO don't see the reason to use a lambda as the goal is just to upload
a file into location and here is how it could be done without lambda.
Lambda and your SQL Server possibly will not be in the same cloud
Extra effort to setup lambda and posting data which is only moved to s3
What you need is :
a script to generate the report
a shell script upload-report-s3.sh
Use aws cli to upload generated file aws s3 cp ./path-to-report-file.csv s3://your-bucket/extra-path/target-location.csv
a cron job to integrate point 1) and 2)
Note: S3 bucket is already created
I am pulling my hair for 2 hours trying to find what's going on. I've got UnknownError from making use of AWSJavaScriptSDK to upload file to AWS Glacier.
var AWS, buffer, config, glacier, params, vaultName;
AWS = require('aws-sdk');
AWS.config.update({
accessKeyId: "KEY_ID",
secretAccessKey: "SECRETE",
region: "ap-southeast-2"
});
glacier = new AWS.Glacier();
vaultName = "arn:aws:glacier:ap-southeast-2:VAULT_ID:vaults/VAULT_NAME";
buffer = new Buffer(2.5 * 1024 * 1024);
params = {
vaultName: vaultName,
body: buffer
};
glacier.uploadArchive(params, function(err, data) {
if (err) {
return console.log("Error uploading archive!", err);
}
});
The result in the console is bellow:
Error uploading archive! { [UnknownError: 400]
message: '400',
code: 'UnknownError',
statusCode: 400,
time: Mon May 04 2015 00:03:20 GMT+1000 (AEST),
retryable: false,
retryDelay: 30 }
I've tried:
Using root api keys and secrete ( try to rule out permission issue )
not setting the api keys or set a wrong ones
Upload file from fs.readFile instead of using Buffer
include accountId: "-" to the param variable
All attempts result in the same error.
Answer my question. The vaultName should just be the name instead of the whole arn string.
The err message did not even say it !
I'm trying to upload a PDF to an S3 bucket using the Knox library, but I keep getting 505 errors and the PDFs won't save. My code:
// all of this works well
var knox = require('knox');
var client = knox.createClient(require('../path/to/config.js').knox);
client.putFile('tmp/file', '/prefix/key',
function(err, res) {
if (err) {
console.log("Error PUTing file in S3:", err);
}
console.log("S3 RESPONSE:", res.statusCode); // returns 505
}
);
Anyone have any insight into what I'm doing wrong? I've also tried setting my own headers using client.put(..), but I got the same 505 response.
2 Possible reasons.
1) If this is your complete code, then you forgot to enter the key,secret and bucket.
var client = knox.createClient({
key: '<api-key-here>'
, secret: '<secret-here>'
, bucket: 'learnboost'
});
2) There is a space in the file name that you are trying to upload.
This isn't an answer per se, and I'm still unsure about the 505 response above, but the AWS SDK that Amazon puts out works great if anyone is having similar issues with Knox. The above just becomes:
var aws = require('aws-sdk');
aws.config.loadFromPath('./path/to/config.json');
var s3 = new aws.S3();
var params = { Bucket: 'your-bucket',
Key: 'your-key',
Body: fs.readFileSync('/path/to/file.pdf') };
s3.putObject(params, function(err, data) {
if (err) {
console.log("Error PUTing file:", err);
}
console.log("S3 RESPONSE:", data);
});
I'm trying to use the basic tutorial to create an S3 bucket as follows
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./myawsconfig.json');
AWS.config.update({region: 'eu-west-1'});
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'pBucket'}, function() {
var data = {Bucket: 'pBucket', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
But I'm receiving the following error
node createbucket.js
Error uploading data: { [NoSuchBucket: The specified bucket does not exist]
message: 'The specified bucket does not exist',
code: 'NoSuchBucket',
name: 'NoSuchBucket',
statusCode: 404,
retryable: false }
I just ran into this problem, apparently the Node.js tutorial code doesn't compile. I got a var Object doesn't have createBucket method.
This worked:
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./credentials.json');
// Set your region for future requests.
AWS.config.update({region: 'us-east-1'});
// Create a bucket and put something in it.
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'hackathon-test'}, function() {
var data = {Bucket: 'hackathon-test', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
I had this issue, discovering that my api-user didn't have permission to create the bucket.
Slightly more thorough error checking revealed this...
s3.client.createBucket({Bucket: 'someBucket'}, function(err) {
if (err) {
console.log("Error creating bucket: ", err);
} else {
console.log("Successfully created bucket 'someBucket'");
}
// ...
According to aws S3 bucket name restrictions.
your bucket name shouldn't contain any uppercase letter. so 'pBucket' is invalid.
http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
The rules for DNS-compliant bucket names are:
Bucket names must be at least 3 and no more than 63 characters long.
Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). Bucket names can contain
lowercase letters, numbers, and hyphens. Each label must start and end
with a lowercase letter or a number.
Bucket names must not be
formatted as an IP address (e.g., 192.168.5.4).
When using virtual
hosted–style buckets with SSL, the SSL wild card certificate only
matches buckets that do not contain periods. To work around this, use
HTTP or write your own certificate verification logic.
A couple of pointers that I missed and someone may find useful
if you set the region as part of the S3 object var s3 = new AWS.S3({region: 'us-west-1'}); then the call will fail (in my experience).
You can therefore set the region via either
a) AWS.config.update({ region: 'eu-west-1' });
b) as part of the params on createBucket
s3.createBucket({
Bucket: bucketName,
CreateBucketConfiguration: {
LocationConstraint: "eu-west-1"
}
}, function () {
also, watch out for caps or underscores in the bucket name as that took an hour of my life too (DNS compliant only).
I've tried using aws-sdk and knox and I get status code 301 trying to upload images. I get status code 301 and message - 'The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint. This works in php.
AWS.config.loadFromPath(__dirname + '/config/config.json');
fs.readFile(source, function (err, data) {
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'mystuff'}, function() {
var d = {
Bucket: 'mystuff',
Key: 'img/test.jpg',
Body: data,
ACL: 'public-read'
};
s3.client.putObject(d, function(err, res) {
if (err) {
console.log("Error uploading data: ", err);
callback(err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
callback(res);
}
});
});
});
I actually solved this problem. In your config you have to have a region, since my bucket was "US Standard", I left my region blank and it worked.
config.json -
{ "accessKeyId": "secretKey", "secretAccessKey": "secretAccessKey", "region": ""}
go to s3 management console select one of your files and click on proporties - > look at the file link.
US standard
https://s3.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3.amazonaws.com/
us-west-1
https://s3-us-west-1.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3-us-west-1.amazonaws.com/
Did you try .send()?
I can upload to S3 by below code.
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/AWSRequest.html
var s3object = {Bucket: 'mystuff', Key: name, Body : data['data']};
s3.client.putObject(s3object).done(function(resp){
console.log("Successfully uploaded data");
}).fail(function(resp){
console.log(resp);
}).send();
I have the same problem with the new SDK and solved it by setting the endpoint option explicitly.
Reference : http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#constructor_details
Snippet:
var AWS = require('aws-sdk');
var s3 = new AWS.S3({ endpoint :'https://s3-your-region-varies.amazonaws.com' }),
myBucket = 'your-bucket-name';
var params = {Bucket: myBucket, Key: 'myUpload', Body: "Test"};
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log("Successfully uploaded data to "+myBucket+"/testKeyUpload");
}
});
Alternatively, you can solve this by setting the region in your config file and you just have to be precise of your region name.